relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
PyTorch/Translation/GNMT/scripts/tests
tests
inference
#!/bin/bash # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. set -e DATASET_DIR='data/wmt16_de_en' REPO_DIR='/workspace/gnmt' REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_inference_performance MATH=$1 if [[ ${MATH} != "fp16" && ${MATH} != "fp32" && ${MATH} != "tf32" ]]; then echo "Unsupported option for MATH, use either 'fp16' or 'fp32' or 'tf32'" exit 1 fi BATCH_SIZE=128 BEAM_SIZE=5 PERF_TOLERANCE=0.95 GPU_NAME=`nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq` echo 'GPU_NAME:' ${GPU_NAME} REFERENCE_PERF=`grep "${MATH},${BATCH_SIZE},${BEAM_SIZE},${GPU_NAME}" \ ${REFERENCE_FILE} | \cut -f 5 -d ','` if [ -z "${REFERENCE_PERF}" ]; then echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG" TARGET_PERF='' else PERF_THRESHOLD=$(awk 'BEGIN {print ('${REFERENCE_PERF}' * '${PERF_TOLERANCE}')}') TARGET_PERF='--target-perf '${PERF_THRESHOLD} fi cd $REPO_DIR python3 translate.py \ --input ${DATASET_DIR}/newstest2014.en \ --reference ${DATASET_DIR}/newstest2014.de \ --output /tmp/output \ --model gnmt/model_best.pth \ --batch-size ${BATCH_SIZE} \ --beam-size ${BEAM_SIZE} \ --math ${MATH} \ --warmup 1 \ --target-bleu 24.3 \ ${TARGET_PERF}
PyTorch/Classification/GPUNet/triton
triton
export_model
#!/usr/bin/env python3 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file DATALOADER_FN_NAME, BaseLoader, BaseSaver, ExportFormat, ModelInputType, TorchJit, load_from_file, ) from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file LOGGER = logging.getLogger("export_model") INPUT_MODEL_TYPES = [ ModelInputType.TF_ESTIMATOR, ModelInputType.TF_KERAS, ModelInputType.PYT, ] OUTPUT_MODEL_TYPES = [ ExportFormat.TF_SAVEDMODEL, ExportFormat.TORCHSCRIPT, ExportFormat.ONNX, ] TORCH_JIT_TYPES = [ TorchJit.NONE, TorchJit.TRACE, TorchJit.SCRIPT, ] def _get_args(): parser = argparse.ArgumentParser( description="Script for exporting models from supported frameworks.", allow_abbrev=False ) parser.add_argument("--input-path", help="Path to input python module", required=True) parser.add_argument( "--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True ) parser.add_argument("--output-path", help="Path to output model file", required=True) parser.add_argument( "--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True ) parser.add_argument( "--torch-jit", help="Torch Jit", choices=[f.value for f in TORCH_JIT_TYPES], required=False, default=None, ) parser.add_argument("--dataloader", help="Path to python module containing data loader") parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value: saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) ArgParserGenerator(Saver).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") dataloader_fn = None if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) Loader: BaseLoader = loaders.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) print(args.input_path) print(os.path.isfile(args.input_path)) print(args.output_type) model = loader.load( args.input_path, dataloader_fn=dataloader_fn, output_type=args.output_type, torch_jit=args.torch_jit, ) LOGGER.info("inputs: %s", model.inputs) LOGGER.info("outputs: %s", model.outputs) if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value: saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}" else: saver_type = args.output_type Saver: BaseSaver = savers.get(saver_type) saver = ArgParserGenerator(Saver).from_args(args) saver.save(model, args.output_path, dataloader_fn) if __name__ == "__main__": main()
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda
bermuda
pyt
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import typing from collections import Counter from pathlib import Path from typing import Dict, Optional, Union import numpy as np import torch # pytype: disable=import-error import yaml from model_navigator.model import ModelSignatureConfig from model_navigator.tensor import TensorSpec from model_navigator.utils.config import YamlConfigFile from ..core import ( GET_MODEL_FN_NAME, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, load_from_file, ) from ..extensions import loaders, runners, savers from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes LOGGER = logging.getLogger(__name__) def get_sample_input(dataloader, device): for batch in dataloader: _, x, _ = batch break if isinstance(x, dict): sample_input = list(x.values()) elif isinstance(x, list): sample_input = x else: raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict") for idx, s in enumerate(sample_input): sample_input[idx] = torch.from_numpy(s).to(device) return tuple(sample_input) def get_model_device(torch_model): if next(torch_model.parameters()).is_cuda: return "cuda" else: return "cpu" def infer_model_precision(model): counter = Counter() for param in model.parameters(): counter[param.dtype] += 1 if counter[torch.float16] > 0: return Precision.FP16 else: return Precision.FP32 def _get_tensor_dtypes(dataloader, precision): def _get_dtypes(t): def _get_dtype(v): dtype = str(v.dtype) if dtype == "float64": dtype = "float32" if precision == Precision.FP16 and dtype == "float32": dtype = "float16" return np.dtype(dtype) return {k: _get_dtype(v) for k, v in t.items()} batch = next(dataloader) _, x, y = batch input_dtypes = _get_dtypes(x) output_dtypes = _get_dtypes(y) return input_dtypes, output_dtypes ### TODO assumption: floating point input ### type has same precision as the model def _get_model_signature( inputs_names: typing.List[str], outputs_names: typing.List[str], precision, dataloader_fn, batch_size_dim: typing.Optional[int] = None, ): dataloader = dataloader_fn() input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision) input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim) inputs = { name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names } outputs = { name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name])) for name in outputs_names } return ModelSignatureConfig(inputs, outputs) class PyTorchModelLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **kwargs) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) model, io_names_dict = get_model(**self._model_args) dataloader_fn = kwargs.get("dataloader_fn", None) output_type = kwargs.get("output_type", None) precision = infer_model_precision(model) batch_axis = getattr(model, "bermuda_batch_axis", 0) # by default models supports batching; batch_axis=0 model_signature = _get_model_signature( inputs_names=io_names_dict["inputs"], outputs_names=io_names_dict["outputs"], precision=precision, dataloader_fn=dataloader_fn, batch_size_dim=batch_axis, ) model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs) if output_type == Format.TS_TRACE.value: return self._trace(model, dataloader_fn) elif output_type == Format.TS_SCRIPT.value: return self._script(model) elif output_type == Format.ONNX.value: return model else: raise ValueError(f"Not supported PyTorch format: {output_type}") def _trace(self, model: Model, dataloader_fn) -> Model: device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input}) return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) def _script(self, model: Model) -> Model: scripted_model = torch.jit.script(model.handle) return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs) class TorchScriptLoader(BaseLoader): def __init__(self, tensor_names_path: str = None, **kwargs): self._model_args = kwargs self._io_spec = None if tensor_names_path is not None: with Path(tensor_names_path).open("r") as fh: tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader) self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"]) def load(self, model_path: Union[str, Path], **_) -> Model: if not isinstance(model_path, Path): model_path = Path(model_path) model = torch.jit.load(model_path.as_posix()) precision = infer_model_precision(model) io_spec = self._io_spec if not io_spec: yaml_path = model_path.parent / f"{model_path.name}.yaml" if not yaml_path.is_file(): raise ValueError( f"If `--tensor-names-path is not provided, " f"TorchScript model loader expects file {yaml_path} with tensor information." ) with yaml_path.open("r") as fh: tensor_info = yaml.load(fh, Loader=yaml.SafeLoader) io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"]) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class PYT2ONNXSaver(BaseSaver): def __init__(self, onnx_opset: int = None): self._onnx_opset = onnx_opset def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted." dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0) device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) with torch.no_grad(): torch.onnx.export( model.handle, dummy_input, model_path, do_constant_folding=True, input_names=list(model.inputs), output_names=list(model.outputs), dynamic_axes=dynamic_axes, opset_version=self._onnx_opset, ) class TorchScriptSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: if not isinstance(model_path, Path): model_path = Path(model_path) if isinstance(model.handle, torch.jit.ScriptModule): torch.jit.save(model.handle, model_path.as_posix()) else: raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.") signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs) annotation_path = model_path.parent / f"{model_path.name}.yaml" with YamlConfigFile(annotation_path) as config_file: config_file.save_config(signature_config) class PyTorchRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return PyTorchRunnerSession(model=model) class PyTorchRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted." self._model = model self._output_names = None def __enter__(self): self._output_names = list(self._model.outputs) return self def __exit__(self, exc_type, exc_value, traceback): self._output_names = None self._model = None def __call__(self, x: Dict[str, object]): with torch.no_grad(): feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()] y_pred = self._model.handle(*feed_list) if isinstance(y_pred, torch.Tensor): y_pred = (y_pred,) y_pred = [t.cpu().numpy() for t in y_pred] y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.PYT.value, PyTorchModelLoader) loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader) loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader) savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver) savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver) savers.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXSaver) runners.register_extension(Format.PYT.value, PyTorchRunner) runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner) runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading
dataloading
feature_spec
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import yaml import os from typing import Dict from typing import List import numpy as np from .defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, \ TRAIN_MAPPING, TEST_MAPPING, \ CARDINALITY_SELECTOR, DTYPE_SELECTOR, \ SPLIT_BINARY """ For performance reasons, numerical features are required to appear in the same order in both source_spec and channel_spec. For more detailed requirements, see the check_feature_spec method""" TYPE_SELECTOR = "type" FEATURES_SELECTOR = "features" FILES_SELECTOR = "files" class FeatureSpec: """ This class contains the metadata necessary to find, interpret, load and dataset and supply it to the model. feature_spec section contains the definitions and per-feature metadata of features used in the model source_spec contains the specifics of how the feature data is sourced. It is a dict of configurations, each providing an instance of the dataset, for example a train or test part channel_spec the configuration of which features are used by which channels of the model metadata is an optional dictionary of additional, dataset-wide metadata base_directory is the path relative to which all paths contained in FeatureSpec are interpreted """ def __init__(self, feature_spec=None, source_spec=None, channel_spec=None, metadata=None, base_directory=None): self.feature_spec: Dict = feature_spec if feature_spec is not None else {} self.source_spec: Dict = source_spec if source_spec is not None else {} self.channel_spec: Dict = channel_spec if channel_spec is not None else {} self.metadata: Dict = metadata if metadata is not None else {} self.base_directory: str = base_directory @classmethod def from_yaml(cls, path): with open(path, 'r') as feature_spec_file: base_directory = os.path.dirname(path) feature_spec = yaml.safe_load(feature_spec_file) return cls.from_dict(feature_spec, base_directory=base_directory) @classmethod def from_dict(cls, source_dict, base_directory): return cls(base_directory=base_directory, **source_dict) def to_dict(self) -> Dict: attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata'] return {attr: self.__dict__[attr] for attr in attributes_to_dump} def to_string(self): return yaml.dump(self.to_dict()) def to_yaml(self, output_path=None): if not output_path: output_path = self.base_directory + '/feature_spec.yaml' with open(output_path, 'w') as output_file: print(yaml.dump(self.to_dict()), file=output_file) def get_number_of_numerical_features(self) -> int: numerical_features = self.channel_spec[NUMERICAL_CHANNEL] return len(numerical_features) def cat_positions_to_names(self, positions: List[int]): # Ordering needs to correspond to the one in get_categorical_sizes() feature_names = self.get_categorical_feature_names() return [feature_names[i] for i in positions] def get_categorical_feature_names(self): """ Provides the categorical feature names. The returned order should me maintained.""" return self.channel_spec[CATEGORICAL_CHANNEL] def get_categorical_sizes(self) -> List[int]: """For a given feature spec, this function is expected to return the sizes in the order corresponding to the order in the channel_spec section """ categorical_features = self.get_categorical_feature_names() cardinalities = [self.feature_spec[feature_name][CARDINALITY_SELECTOR] for feature_name in categorical_features] return cardinalities # *** Feature Spec checking *** # def _check_feature_spec_general(self): # check that correct dtypes are provided for all features for feature_dict in self.feature_spec.values(): assert DTYPE_SELECTOR in feature_dict try: np.dtype(feature_dict[DTYPE_SELECTOR]) except TypeError: assert False, "Type not understood by numpy" def _check_source_spec_section_model_specific(self): set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL]) set_of_numerical_features = set(self.channel_spec[NUMERICAL_CHANNEL]) set_of_label_features = set(self.channel_spec[LABEL_CHANNEL]) numerical_features_list = self.channel_spec[NUMERICAL_CHANNEL] # check that mappings are the ones expected mapping_name_list = list(self.source_spec.keys()) assert sorted(mapping_name_list) == sorted([TEST_MAPPING, TRAIN_MAPPING]) for mapping_name in [TRAIN_MAPPING, TEST_MAPPING]: mapping = self.source_spec[mapping_name] mapping_features = set() for chunk in mapping: # check that chunk has the correct type assert chunk[TYPE_SELECTOR] == SPLIT_BINARY contained_features = chunk[FEATURES_SELECTOR] containing_files = chunk[FILES_SELECTOR] # check that features are unique in mapping for feature in contained_features: assert feature not in mapping_features mapping_features.add(feature) # check that chunk has at least one features assert len(contained_features) >= 1 # check that chunk has exactly file assert len(containing_files) == 1 first_feature = contained_features[0] if first_feature in set_of_categorical_features: # check that each categorical feature is in a different file assert len(contained_features) == 1 # check that the type is one of the supported assert self.feature_spec[first_feature][DTYPE_SELECTOR] in {'int8', 'int16', 'int32'} elif first_feature in set_of_numerical_features: # check that numerical features are all in one chunk assert sorted(contained_features) == sorted(numerical_features_list) # check that ordering is exactly same as in channel spec - required for performance assert contained_features == numerical_features_list # check numerical dtype for feature in contained_features: assert np.dtype(self.feature_spec[feature][DTYPE_SELECTOR]) == np.float16 elif first_feature in set_of_label_features: # check that label feature is in a separate chunk assert len(contained_features) == 1 # check label dtype assert np.dtype(self.feature_spec[first_feature][DTYPE_SELECTOR]) == bool else: assert False, "Feature of unknown type" # check that all features appeared in mapping assert sorted(mapping_features) == sorted(list(self.feature_spec.keys())) def _check_channel_spec_section_model_specific(self): categorical_features_list = self.channel_spec[CATEGORICAL_CHANNEL] numerical_features_list = self.channel_spec[NUMERICAL_CHANNEL] label_features_list = self.channel_spec[LABEL_CHANNEL] set_of_categorical_features = set(categorical_features_list) set_of_numerical_features = set(numerical_features_list) # check that exactly one label feature is selected assert len(label_features_list) == 1 label_feature_name = label_features_list[0] # check that channels are the ones expected channel_name_list = list(self.channel_spec.keys()) assert sorted(channel_name_list) == sorted([CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL]) # check that all features used in channel spec are exactly ones defined in feature_spec feature_spec_features = list(self.feature_spec.keys()) channel_spec_features = list(set.union(set_of_categorical_features, set_of_numerical_features, {label_feature_name})) assert sorted(feature_spec_features) == sorted(channel_spec_features) # check that lists in channel spec contain unique names assert sorted(list(set_of_categorical_features)) == sorted(categorical_features_list) assert sorted(list(set_of_numerical_features)) == sorted(numerical_features_list) def _check_feature_spec_section_model_specific(self): # check that categorical features have cardinality provided set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL]) for feature_name, feature_dict in self.feature_spec.items(): if feature_name in set_of_categorical_features: assert CARDINALITY_SELECTOR in feature_dict assert isinstance(feature_dict[CARDINALITY_SELECTOR], int) def _check_feature_spec_model_specific(self): self._check_channel_spec_section_model_specific() self._check_feature_spec_section_model_specific() self._check_source_spec_section_model_specific() def check_feature_spec(self): self._check_feature_spec_general() self._check_feature_spec_model_specific() # TODO check if cardinality fits in dtype, check if base directory is set @staticmethod def get_default_feature_spec(number_of_numerical_features, categorical_feature_cardinalities): numerical_feature_fstring = "num_{}" categorical_feature_fstring = "cat_{}.bin" label_feature_name = "label" numerical_file_name = "numerical.bin" categorical_file_fstring = "{}" # TODO remove .bin from feature name, add to file name label_file_name = "label.bin" number_of_categorical_features = len(categorical_feature_cardinalities) numerical_feature_names = [numerical_feature_fstring.format(i) for i in range(number_of_numerical_features)] categorical_feature_names = [categorical_feature_fstring.format(i) for i in range(number_of_categorical_features)] cat_feature_types = [get_categorical_feature_type(int(cat_size)) for cat_size in categorical_feature_cardinalities] feature_dict = {f_name: {DTYPE_SELECTOR: str(np.dtype(f_type)), CARDINALITY_SELECTOR: f_size} for f_name, f_type, f_size in zip(categorical_feature_names, cat_feature_types, categorical_feature_cardinalities)} for f_name in numerical_feature_names: feature_dict[f_name] = {DTYPE_SELECTOR: str(np.dtype(np.float16))} feature_dict[label_feature_name] = {DTYPE_SELECTOR: str(np.dtype(bool))} channel_spec = {CATEGORICAL_CHANNEL: categorical_feature_names, NUMERICAL_CHANNEL: numerical_feature_names, LABEL_CHANNEL: [label_feature_name]} source_spec = {} for filename in (TRAIN_MAPPING, TEST_MAPPING): source_spec[filename] = [] dst_folder = filename numerical_file_path = os.path.join(dst_folder, numerical_file_name) source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY, FEATURES_SELECTOR: numerical_feature_names, FILES_SELECTOR: [numerical_file_path]}) label_file_path = os.path.join(dst_folder, label_file_name) source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY, FEATURES_SELECTOR: [label_feature_name], FILES_SELECTOR: [label_file_path]}) for feature_name in categorical_feature_names: categorical_file_name = categorical_file_fstring.format(feature_name) categorical_file_path = os.path.join(dst_folder, categorical_file_name) source_spec[filename].append({TYPE_SELECTOR: SPLIT_BINARY, FEATURES_SELECTOR: [feature_name], FILES_SELECTOR: [categorical_file_path]}) return FeatureSpec(feature_spec=feature_dict, source_spec=source_spec, channel_spec=channel_spec, metadata={}) def get_mapping_paths(self, mapping_name: str): label_feature_name = self.channel_spec[LABEL_CHANNEL][0] set_of_categorical_features = set(self.channel_spec[CATEGORICAL_CHANNEL]) set_of_numerical_features = set(self.channel_spec[NUMERICAL_CHANNEL]) label_path = None numerical_path = None categorical_paths = dict() for chunk in self.source_spec[mapping_name]: local_path = os.path.join(self.base_directory, chunk[FILES_SELECTOR][0]) if chunk[FEATURES_SELECTOR][0] in set_of_numerical_features: numerical_path = local_path elif chunk[FEATURES_SELECTOR][0] in set_of_categorical_features: local_feature = chunk[FEATURES_SELECTOR][0] categorical_paths[local_feature] = local_path elif chunk[FEATURES_SELECTOR][0] == label_feature_name: label_path = local_path return label_path, numerical_path, categorical_paths def get_categorical_feature_type(size: int): """This function works both when max value and cardinality is passed. Consistency by the user is required""" types = (np.int8, np.int16, np.int32) for numpy_type in types: if size < np.iinfo(numpy_type).max: return numpy_type raise RuntimeError(f"Categorical feature of size {size} is too big for defined types")
TensorFlow/Segmentation/UNet_Industrial/scripts
scripts
UNet_AMP_8GPU
#!/usr/bin/env bash # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches UNet training in FP32-AMP on 8 GPUs using 16 batch size (2 per GPU) # Usage ./UNet_AMP_8GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)> BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export TF_CPP_MIN_LOG_LEVEL=3 mpirun \ -np 8 \ -H localhost:8 \ -bind-to none \ -map-by slot \ -x NCCL_DEBUG=VERSION \ -x LD_LIBRARY_PATH \ -x PATH \ -mca pml ob1 -mca btl ^openib \ --allow-run-as-root \ python "${BASEDIR}/../main.py" \ --unet_variant='tinyUNet' \ --activation_fn='relu' \ --exec_mode='train_and_evaluate' \ --iter_unit='batch' \ --num_iter=2500 \ --batch_size=2 \ --warmup_step=10 \ --results_dir="${1}" \ --data_dir="${2}" \ --dataset_name='DAGM2007' \ --dataset_classID="${3}" \ --data_format='NCHW' \ --use_auto_loss_scaling \ --amp \ --xla \ --learning_rate=1e-4 \ --learning_rate_decay_factor=0.8 \ --learning_rate_decay_steps=500 \ --rmsprop_decay=0.9 \ --rmsprop_momentum=0.8 \ --loss_fn_name='adaptive_loss' \ --weight_decay=1e-5 \ --weight_init_method='he_uniform' \ --augment_data \ --display_every=250 \ --debug_verbosity=0
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/tasks
tasks
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .ec import train_ec
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers
layers
attention
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras-based attention layer.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import math import tensorflow as tf from official.nlp.modeling.layers import dense_einsum from official.nlp.modeling.layers import masked_softmax # @tf.keras.utils.register_keras_serializable(package="Text") class Attention(tf.keras.layers.Layer): """Attention layer. This is an implementation of multi-headed attention based on "Attention is all you Need". If `from_tensor` and `to_tensor` are the same, then this is self-attention. Each timestep in `from_tensor` attends to the corresponding sequence in `to_tensor`, and returns a fixed-width vector. This function first projects `from_tensor` into a "query" tensor and `to_tensor` into "key" and "value" tensors. These are (effectively) a list of tensors of length `num_attention_heads`, where each tensor is of shape [batch_size, seq_length, size_per_head]. Then, the query and key tensors are dot-producted and scaled. These are softmaxed to obtain attention probabilities. The value tensors are then interpolated by these probabilities, then concatenated back to a single tensor and returned. Attributes: num_heads: Number of attention heads. head_size: Size of each attention head. dropout: Dropout probability. kernel_initializer: Initializer for dense layer kernels. bias_initializer: Initializer for dense layer biases. kernel_regularizer: Regularizer for dense layer kernels. bias_regularizer: Regularizer for dense layer biases. activity_regularizer: Regularizer for dense layer activity. kernel_constraint: Constraint for dense layer kernels. bias_constraint: Constraint for dense layer kernels. """ def __init__(self, num_heads, head_size, dropout_rate=0.0, kernel_initializer="glorot_uniform", bias_initializer="zeros", kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): super(Attention, self).__init__(**kwargs) self._num_heads = num_heads self._head_size = head_size self._dropout_rate = dropout_rate self._kernel_initializer = tf.keras.initializers.get(kernel_initializer) self._bias_initializer = tf.keras.initializers.get(bias_initializer) self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer) self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer) self._kernel_constraint = tf.keras.constraints.get(kernel_constraint) self._bias_constraint = tf.keras.constraints.get(bias_constraint) self._query_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="query") self._key_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="key") self._value_dense = dense_einsum.DenseEinsum( output_shape=(self._num_heads, self._head_size), kernel_initializer=self._kernel_initializer, bias_initializer=self._bias_initializer, kernel_regularizer=self._kernel_regularizer, bias_regularizer=self._bias_regularizer, activity_regularizer=self._activity_regularizer, kernel_constraint=self._kernel_constraint, bias_constraint=self._bias_constraint, name="value") self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[1]) self._dropout = tf.keras.layers.Dropout(rate=self._dropout_rate) def get_config(self): config = { "num_heads": self._num_heads, "head_size": self._head_size, "dropout_rate": self._dropout_rate, "kernel_initializer": tf.keras.initializers.serialize(self._kernel_initializer), "bias_initializer": tf.keras.initializers.serialize(self._bias_initializer), "kernel_regularizer": tf.keras.regularizers.serialize(self._kernel_regularizer), "bias_regularizer": tf.keras.regularizers.serialize(self._bias_regularizer), "activity_regularizer": tf.keras.regularizers.serialize(self._activity_regularizer), "kernel_constraint": tf.keras.constraints.serialize(self._kernel_constraint), "bias_constraint": tf.keras.constraints.serialize(self._bias_constraint) } base_config = super(Attention, self).get_config() return dict(list(base_config.items()) + list(config.items())) def call(self, inputs): from_tensor = inputs[0] to_tensor = inputs[1] attention_mask = inputs[2] if len(inputs) == 3 else None # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_tensor` = [B, F, N ,H] query_tensor = self._query_dense(from_tensor) # `key_tensor` = [B, T, N, H] key_tensor = self._key_dense(to_tensor) # `value_tensor` = [B, T, N, H] value_tensor = self._value_dense(to_tensor) # Take the dot product between "query" and "key" to get the raw # attention scores. #attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_tensor, query_tensor) # Instead of using the einsum equation, we expand it into the below # equivalent equations. # `query_tensor` = [B, N, F, H] query_tensor = tf.transpose(query_tensor, [0, 2, 1, 3]) # `key_tensor` = [B, N, T, H] key_tensor = tf.transpose(key_tensor, [0, 2, 1, 3]) # `attention_scores` = [B, N, F, T] attention_scores = tf.matmul(query_tensor, key_tensor, transpose_b=True) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._head_size))) # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = self._masked_softmax([attention_scores, attention_mask]) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self._dropout(attention_probs) # `context_layer` = [B, F, N, H] return tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_tensor) # @tf.keras.utils.register_keras_serializable(package="Text") class CachedAttention(Attention): """Attention layer with cache used for auto-agressive decoding. Attributes: num_heads: Number of attention heads. head_size: Size of each attention head. **kwargs: Other keyword arguments inherit from `Attention` class. """ def __init__(self, num_heads, head_size, **kwargs): super(CachedAttention, self).__init__(num_heads, head_size, **kwargs) def _update_cache(self, key_tensor, value_tensor, cache, decode_loop_step): """Updates cache states and gets full-length key/value tensors.""" # Combines cached keys and values with new keys and values. if decode_loop_step is not None: # TPU special case. key_seq_dim = cache["key"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_loop_step, key_seq_dim, dtype=key_tensor.dtype), [1, key_seq_dim, 1, 1]) key_tensor = cache["key"] + key_tensor * indices value_seq_dim = cache["value"].shape.as_list()[1] indices = tf.reshape( tf.one_hot(decode_loop_step, value_seq_dim, dtype=value_tensor.dtype), [1, value_seq_dim, 1, 1]) value_tensor = cache["value"] + value_tensor * indices else: key_tensor = tf.concat( [tf.cast(cache["key"], key_tensor.dtype), key_tensor], axis=1) value_tensor = tf.concat( [tf.cast(cache["value"], value_tensor.dtype), value_tensor], axis=1) # Update cache cache["key"] = key_tensor cache["value"] = value_tensor return key_tensor, value_tensor def call(self, inputs, decode_loop_step=None): from_tensor = inputs[0] to_tensor = inputs[1] attention_mask = inputs[2] if len(inputs) >= 3 else None cache = inputs[3] if len(inputs) >= 4 else None # Scalar dimensions referenced here: # B = batch size (number of sequences) # F = `from_tensor` sequence length # T = `to_tensor` sequence length # N = `num_attention_heads` # H = `size_per_head` # `query_tensor` = [B, F, N ,H] query_tensor = self._query_dense(from_tensor) # `key_tensor` = [B, T, N, H] key_tensor = self._key_dense(to_tensor) # `value_tensor` = [B, T, N, H] value_tensor = self._value_dense(to_tensor) if cache: key_tensor, value_tensor = self._update_cache(key_tensor, value_tensor, cache, decode_loop_step) # Take the dot product between "query" and "key" to get the raw # attention scores. attention_scores = tf.einsum("BTNH,BFNH->BNFT", key_tensor, query_tensor) attention_scores = tf.multiply(attention_scores, 1.0 / math.sqrt(float(self._head_size))) # Normalize the attention scores to probabilities. # `attention_probs` = [B, N, F, T] attention_probs = self._masked_softmax([attention_scores, attention_mask]) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self._dropout(attention_probs) # `context_layer` = [B, F, N, H] return tf.einsum("BNFT,BTNH->BFNH", attention_probs, value_tensor), cache
PyTorch/LanguageModeling/BERT/triton/dist4l/scripts
scripts
setup_environment
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKDIR="${WORKDIR:=$(pwd)}" export DATASETS_DIR=${WORKDIR}/datasets export WORKSPACE_DIR=${WORKDIR}/runner_workspace export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store export SHARED_DIR=${WORKSPACE_DIR}/shared_dir echo "Preparing directories" mkdir -p ${WORKSPACE_DIR} mkdir -p ${DATASETS_DIR} mkdir -p ${CHECKPOINTS_DIR} mkdir -p ${MODEL_REPOSITORY_PATH} mkdir -p ${SHARED_DIR} echo "Setting up environment" export MODEL_NAME=BERT export ENSEMBLE_MODEL_NAME= export TRITON_LOAD_MODEL_METHOD=explicit export TRITON_INSTANCES=1
PyTorch/Translation/Transformer/fairseq/data
data
dictionary
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from collections import Counter import os import torch class Dictionary(object): """A mapping from symbols to consecutive integers""" def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'): self.unk_word, self.pad_word, self.eos_word = unk, pad, eos self.symbols = [] self.count = [] self.indices = {} # dictionary indexing starts at 1 for consistency with Lua self.add_symbol('<Lua heritage>') self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def index(self, sym): """Returns the index of the specified symbol""" if sym in self.indices: return self.indices[sym] return self.unk_index def string(self, tensor, bpe_symbol=None, escape_unk=False): """Helper for converting a tensor of token indices to a string. Can optionally remove BPE symbols or escape <unk> words. """ if torch.is_tensor(tensor) and tensor.dim() == 2: return '\n'.join(self.string(t) for t in tensor) def token_string(i): if i == self.unk(): return self.unk_string(escape_unk) else: return self[i] sent = ' '.join(token_string(i) for i in tensor if i != self.eos()) if bpe_symbol is not None: sent = (sent + ' ').replace(bpe_symbol, '').rstrip() return sent def unk_string(self, escape=False): """Return unknown string, optionally escaped as: <<unk>>""" if escape: return '<{}>'.format(self.unk_word) else: return self.unk_word def add_symbol(self, word, n=1): """Adds a word to the dictionary""" if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2]) def finalize(self, threshold=-1, nwords=-1, padding_factor=8): """Sort symbols by frequency in descending order, ignoring special ones. Args: - threshold defines the minimum word count - nwords defines the total number of words in the final dictionary, including special symbols - padding_factor can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ if nwords <= 0: nwords = len(self) new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial))) new_symbols = self.symbols[:self.nspecial] new_count = self.count[:self.nspecial] c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:]))) for symbol, count in c.most_common(nwords - self.nspecial): if count >= threshold: new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(count) else: break threshold_nwords = len(new_symbols) if padding_factor > 1: i = 0 while threshold_nwords % padding_factor != 0: symbol = 'madeupword{:04d}'.format(i) new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(0) i += 1 threshold_nwords += 1 assert len(new_symbols) % padding_factor == 0 assert len(new_symbols) == len(new_indices) self.count = list(new_count) self.symbols = list(new_symbols) self.indices = new_indices def pad(self): """Helper to get index of pad symbol""" return self.pad_index def eos(self): """Helper to get index of end-of-sentence symbol""" return self.eos_index def unk(self): """Helper to get index of unk symbol""" return self.unk_index @classmethod def loads(cls, s): lines = s.strip().split('\n') d = cls() for line in lines: idx = line.rfind(' ') word = line[:idx] count = int(line[idx + 1:]) d.indices[word] = len(d.symbols) d.symbols.append(word) d.count.append(count) return d @classmethod def load(cls, f, ignore_utf_errors=False): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ if isinstance(f, str): try: if not ignore_utf_errors: with open(f, 'r', encoding='utf-8') as fd: return cls.load(fd) else: with open(f, 'r', encoding='utf-8', errors='ignore') as fd: return cls.load(fd) except FileNotFoundError as fnfe: raise fnfe except Exception: raise Exception("Incorrect encoding detected in {}, please " "rebuild the dataset".format(f)) cont = f.read() d = cls.loads(cont) return d def save(self, f): """Stores dictionary into a text file""" if isinstance(f, str): os.makedirs(os.path.dirname(f), exist_ok=True) with open(f, 'w', encoding='utf-8') as fd: return self.save(fd) d = self.saves() f.write(d) def saves(self): rv = '' for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]): rv += '{} {}\n'.format(symbol, count) return rv def dummy_sentence(self, length): t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long() t[-1] = self.eos() return t def get_metadata(self): return {'len': self.__len__(), 'pad': self.pad_index, 'eos': self.eos_index, 'unk': self.unk_index, 'nspecial': self.nspecial }
PyTorch/Forecasting/TFT/triton/runner
runner
exceptions
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RunnerException(Exception): """ Runner Exception """ def __init__(self, message: str): self._message = message def __str__(self): return self._message @property def message(self): """Get the exception message. Returns ------- str The message associated with this exception, or None if no message. """ return self._message
CUDA-Optimized/FastSpeech/fastspeech/hparams
hparams
trt
parent_yaml: 'infer.yaml' # Inference batch_size: 1 # Batch size. use_trt: True # Usage of TensorRT. Must be True to enable TensorRT. use_fp16: True # Usage of FP16. Set to True to enable half precision for the engine. # TRT trt_file_path: "/workspace/fastspeech/fastspeech.fp16.b1.trt" # Built TensorRT engine file path. trt_max_input_seq_len: 128 # Max input sequence length. trt_max_output_seq_len: 1024 # Max output sequence length. trt_max_ws_size: 8 # Max workspace size in GB avaiable for TensorRT engine build. trt_multi_engine: False # Usage of multi-engines. trt_force_build: False # Force build mode. If True, an engine is forcely built and overwritten to trt_file_path. # WaveGlow Engine waveglow_engine_path: "/workspace/fastspeech/waveglow.fp16.trt"
PyTorch/Recommendation/NCF
NCF
neumf
# Copyright (c) 2018, deepakn94, robieta. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ----------------------------------------------------------------------- # # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch import torch.nn as nn import sys from os.path import abspath, join, dirname class NeuMF(nn.Module): def __init__(self, nb_users, nb_items, mf_dim, mlp_layer_sizes, dropout=0): if mlp_layer_sizes[0] % 2 != 0: raise RuntimeError('u dummy, mlp_layer_sizes[0] % 2 != 0') super(NeuMF, self).__init__() nb_mlp_layers = len(mlp_layer_sizes) self.mf_user_embed = nn.Embedding(nb_users, mf_dim) self.mf_item_embed = nn.Embedding(nb_items, mf_dim) self.mlp_user_embed = nn.Embedding(nb_users, mlp_layer_sizes[0] // 2) self.mlp_item_embed = nn.Embedding(nb_items, mlp_layer_sizes[0] // 2) self.dropout = dropout self.mlp = nn.ModuleList() for i in range(1, nb_mlp_layers): self.mlp.extend([nn.Linear(mlp_layer_sizes[i - 1], mlp_layer_sizes[i])]) # noqa: E501 self.final = nn.Linear(mlp_layer_sizes[-1] + mf_dim, 1) self.mf_user_embed.weight.data.normal_(0., 0.01) self.mf_item_embed.weight.data.normal_(0., 0.01) self.mlp_user_embed.weight.data.normal_(0., 0.01) self.mlp_item_embed.weight.data.normal_(0., 0.01) def glorot_uniform(layer): fan_in, fan_out = layer.in_features, layer.out_features limit = np.sqrt(6. / (fan_in + fan_out)) layer.weight.data.uniform_(-limit, limit) def lecunn_uniform(layer): fan_in, fan_out = layer.in_features, layer.out_features # noqa: F841, E501 limit = np.sqrt(3. / fan_in) layer.weight.data.uniform_(-limit, limit) for layer in self.mlp: if type(layer) != nn.Linear: continue glorot_uniform(layer) lecunn_uniform(self.final) def forward(self, user, item, sigmoid=False): xmfu = self.mf_user_embed(user) xmfi = self.mf_item_embed(item) xmf = xmfu * xmfi xmlpu = self.mlp_user_embed(user) xmlpi = self.mlp_item_embed(item) xmlp = torch.cat((xmlpu, xmlpi), dim=1) for i, layer in enumerate(self.mlp): xmlp = layer(xmlp) xmlp = nn.functional.relu(xmlp) if self.dropout != 0: xmlp = nn.functional.dropout(xmlp, p=self.dropout, training=self.training) x = torch.cat((xmf, xmlp), dim=1) x = self.final(x) if sigmoid: x = torch.sigmoid(x) return x
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts
scripts
export_symbols
#!/usr/bin/env python3 ## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import sys if len(sys.argv) != 3: print("Must specify path to PyTorch Tacotron2 containing 'text' module o load and text file to write") sys.exit(1) modulePath = sys.argv[1] outputPath = sys.argv[2] sys.path.append(modulePath) from text import symbols i=0 with open(outputPath, "w") as fout: print("# sequence-number symbol", file=fout) for s in symbols: print("%d %s" % (i, s), file=fout) i+=1 print("Successfully wrote %d symbols to '%s'." % (i, outputPath))
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/structures
structures
boxlist_ops
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from .bounding_box import BoxList from maskrcnn_benchmark.layers import nms as _box_nms from maskrcnn_benchmark import _C def boxlist_nms(boxlist, nms_thresh, max_proposals=-1, score_field="score"): """ Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Arguments: boxlist(BoxList) nms_thresh (float) max_proposals (int): if > 0, then only the top max_proposals are kept after non-maxium suppression score_field (str) """ if nms_thresh <= 0: return boxlist mode = boxlist.mode boxlist = boxlist.convert("xyxy") boxes = boxlist.bbox score = boxlist.get_field(score_field) keep = _box_nms(boxes, score, nms_thresh) if max_proposals > 0: keep = keep[: max_proposals] boxlist = boxlist[keep] return boxlist.convert(mode) def remove_small_boxes(boxlist, min_size): """ Only keep boxes with both sides >= min_size Arguments: boxlist (Boxlist) min_size (int) """ # TODO maybe add an API for querying the ws / hs xywh_boxes = boxlist.convert("xywh").bbox _, _, ws, hs = xywh_boxes.unbind(dim=1) keep = ( (ws >= min_size) & (hs >= min_size) ).nonzero().squeeze(1) return boxlist[keep] # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def boxlist_iou(boxlist1, boxlist2): """Compute the intersection over union of two set of boxes. The box order must be (xmin, ymin, xmax, ymax). Arguments: box1: (BoxList) bounding boxes, sized [N,4]. box2: (BoxList) bounding boxes, sized [M,4]. Returns: (tensor) iou, sized [N,M]. Reference: https://github.com/chainer/chainercv/blob/master/chainercv/utils/bbox/bbox_iou.py """ if boxlist1.size != boxlist2.size: raise RuntimeError( "boxlists should have same image size, got {}, {}".format(boxlist1, boxlist2)) box1, box2 = boxlist1.bbox, boxlist2.bbox if (box1.is_cuda and box2.is_cuda): iou = _C.box_iou(box1,box2) else: N = len(boxlist1) M = len(boxlist2) area1 = boxlist1.area() area2 = boxlist2.area() lt = torch.max(box1[:, None, :2], box2[:, :2]) # [N,M,2] rb = torch.min(box1[:, None, 2:], box2[:, 2:]) # [N,M,2] TO_REMOVE = 1 wh = (rb - lt + TO_REMOVE).clamp(min=0) # [N,M,2] inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] iou = inter / (area1[:, None] + area2 - inter) return iou # TODO redundant, remove def _cat(tensors, dim=0): """ Efficient version of torch.cat that avoids a copy if there is only a single element in a list """ assert isinstance(tensors, (list, tuple)) if len(tensors) == 1: return tensors[0] return torch.cat(tensors, dim) def cat_boxlist(bboxes): """ Concatenates a list of BoxList (having the same image size) into a single BoxList Arguments: bboxes (list[BoxList]) """ assert isinstance(bboxes, (list, tuple)) assert all(isinstance(bbox, BoxList) for bbox in bboxes) size = bboxes[0].size assert all(bbox.size == size for bbox in bboxes) mode = bboxes[0].mode assert all(bbox.mode == mode for bbox in bboxes) fields = set(bboxes[0].fields()) assert all(set(bbox.fields()) == fields for bbox in bboxes) cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size, mode) for field in fields: data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0) cat_boxes.add_field(field, data) return cat_boxes
PyTorch/Translation/Transformer/fairseq/optim
optim
__init__
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import importlib import os from .fairseq_optimizer import FairseqOptimizer OPTIMIZER_REGISTRY = {} OPTIMIZER_CLASS_NAMES = set() def build_optimizer(args, params): params = filter(lambda p: p.requires_grad, params) return OPTIMIZER_REGISTRY[args.optimizer](args, params) def register_optimizer(name): """Decorator to register a new optimizer.""" def register_optimizer_cls(cls): if name in OPTIMIZER_REGISTRY: raise ValueError('Cannot register duplicate optimizer ({})'.format(name)) if not issubclass(cls, FairseqOptimizer): raise ValueError('Optimizer ({}: {}) must extend FairseqOptimizer'.format(name, cls.__name__)) if cls.__name__ in OPTIMIZER_CLASS_NAMES: # We use the optimizer class name as a unique identifier in # checkpoints, so all optimizer must have unique class names. raise ValueError('Cannot register optimizer with duplicate class name ({})'.format(cls.__name__)) OPTIMIZER_REGISTRY[name] = cls OPTIMIZER_CLASS_NAMES.add(cls.__name__) return cls return register_optimizer_cls # automatically import any Python files in the optim/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('fairseq.optim.' + module)
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit
deployment_toolkit
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/Classification/ConvNets/triton
triton
model
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch def update_argparser(parser): parser.add_argument( "--config", default="resnet50", type=str, required=True, help="Network to deploy") parser.add_argument( "--checkpoint", default=None, type=str, help="The checkpoint of the model. ") parser.add_argument("--classes", type=int, default=1000, help="Number of classes") parser.add_argument("--precision", type=str, default="fp32", choices=["fp32", "fp16"], help="Inference precision") def get_model(**model_args): from image_classification import models model = models.resnet50(pretrained=False) if "checkpoint" in model_args: print(f"loading checkpoint {model_args['checkpoint']}") state_dict = torch.load(model_args["checkpoint"], map_location="cpu") try: model.load_state_dict( { k.replace("module.", ""): v for k, v in state_dict.items() } ) except RuntimeError as RE: if not hasattr(model, "ngc_checkpoint_remap"): raise RE remap_old = model.ngc_checkpoint_remap(version="20.06.0") remap_dist = lambda k: k.replace("module.", "") model.load_state_dict( { remap_old(remap_dist(k)): v for k, v in state_dict.items() } ) if model_args["precision"] == "fp16": model = model.half() model = model.cuda() model.eval() tensor_names = {"inputs": ["INPUT__0"], "outputs": ["OUTPUT__0"]} return model, tensor_names
PyTorch/Forecasting/TFT/triton
triton
requirements
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. model_navigator[pyt] @ git+https://github.com/triton-inference-server/model_navigator.git@v0.2.7#egg=model_navigator natsort>=7.0.0 networkx==2.5 numpy onnx>=1.8.0,<1.9.0 onnxruntime-gpu==1.8.1 pycuda>=2019.1.2 PyYAML>=5.2 tabulate>=0.8.7 tqdm>=4.44.1 triton-model-analyzer==1.22.0
PyTorch/Detection/Efficientdet/effdet/object_detection
object_detection
argmax_matcher
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Argmax matcher implementation. This class takes a similarity matrix and matches columns to rows based on the maximum value per column. One can specify matched_thresholds and to prevent columns from matching to rows (generally resulting in a negative training example) and unmatched_theshold to ignore the match (generally resulting in neither a positive or negative training example). This matcher is used in Fast(er)-RCNN. Note: matchers are used in TargetAssigners. There is a create_target_assigner factory function for popular implementations. """ import torch from torch.nn.functional import one_hot from .matcher import Match from typing import Optional def one_hot_bool(x, num_classes: int): # for improved perf over PyTorch builtin one_hot, scatter to bool onehot = torch.zeros(x.size(0), num_classes, device=x.device, dtype=torch.bool) return onehot.scatter_(1, x.unsqueeze(1), 1) @torch.jit.script class ArgMaxMatcher(object): # cannot inherit with torchscript """Matcher based on highest value. This class computes matches from a similarity matrix. Each column is matched to a single row. To support object detection target assignment this class enables setting both matched_threshold (upper threshold) and unmatched_threshold (lower thresholds) defining three categories of similarity which define whether examples are positive, negative, or ignored: (1) similarity >= matched_threshold: Highest similarity. Matched/Positive! (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity. Depending on negatives_lower_than_unmatched, this is either Unmatched/Negative OR Ignore. (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore. For ignored matches this class sets the values in the Match object to -2. """ def __init__(self, matched_threshold: float, unmatched_threshold: Optional[float] = None, negatives_lower_than_unmatched: bool = True, force_match_for_each_row: bool = False): """Construct ArgMaxMatcher. Args: matched_threshold: Threshold for positive matches. Positive if sim >= matched_threshold, where sim is the maximum value of the similarity matrix for a given column. Set to None for no threshold. unmatched_threshold: Threshold for negative matches. Negative if sim < unmatched_threshold. Defaults to matched_threshold when set to None. negatives_lower_than_unmatched: Boolean which defaults to True. If True then negative matches are the ones below the unmatched_threshold, whereas ignored matches are in between the matched and unmatched threshold. If False, then negative matches are in between the matched and unmatched threshold, and everything lower than unmatched is ignored. force_match_for_each_row: If True, ensures that each row is matched to at least one column (which is not guaranteed otherwise if the matched_threshold is high). Defaults to False. See argmax_matcher_test.testMatcherForceMatch() for an example. Raises: ValueError: if unmatched_threshold is set but matched_threshold is not set or if unmatched_threshold > matched_threshold. """ if (matched_threshold is None) and (unmatched_threshold is not None): raise ValueError('Need to also define matched_threshold when unmatched_threshold is defined') self._matched_threshold = matched_threshold self._unmatched_threshold: float = 0. if unmatched_threshold is None: self._unmatched_threshold = matched_threshold else: if unmatched_threshold > matched_threshold: raise ValueError('unmatched_threshold needs to be smaller or equal to matched_threshold') self._unmatched_threshold = unmatched_threshold if not negatives_lower_than_unmatched: if self._unmatched_threshold == self._matched_threshold: raise ValueError('When negatives are in between matched and unmatched thresholds, these ' 'cannot be of equal value. matched: %s, unmatched: %s', self._matched_threshold, self._unmatched_threshold) self._force_match_for_each_row = force_match_for_each_row self._negatives_lower_than_unmatched = negatives_lower_than_unmatched def _match_when_rows_are_empty(self, similarity_matrix): """Performs matching when the rows of similarity matrix are empty. When the rows are empty, all detections are false positives. So we return a tensor of -1's to indicate that the columns do not match to any rows. Returns: matches: int32 tensor indicating the row each column matches to. """ return -1 * torch.ones(similarity_matrix.shape[1], dtype=torch.long) def _match_when_rows_are_non_empty(self, similarity_matrix): """Performs matching when the rows of similarity matrix are non empty. Returns: matches: int32 tensor indicating the row each column matches to. """ # Matches for each column matched_vals, matches = torch.max(similarity_matrix, 0) # Deal with matched and unmatched threshold if self._matched_threshold is not None: # Get logical indices of ignored and unmatched columns as tf.int64 below_unmatched_threshold = self._unmatched_threshold > matched_vals between_thresholds = (matched_vals >= self._unmatched_threshold) & \ (self._matched_threshold > matched_vals) if self._negatives_lower_than_unmatched: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -1) matches = self._set_values_using_indicator(matches, between_thresholds, -2) else: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -2) matches = self._set_values_using_indicator(matches, between_thresholds, -1) if self._force_match_for_each_row: force_match_column_ids = torch.argmax(similarity_matrix, 1) force_match_column_indicators = one_hot_bool(force_match_column_ids, similarity_matrix.shape[1]) force_match_column_mask, force_match_row_ids = torch.max(force_match_column_indicators, 0) final_matches = torch.where(force_match_column_mask, force_match_row_ids, matches) return final_matches else: return matches def match(self, similarity_matrix): """Tries to match each column of the similarity matrix to a row. Args: similarity_matrix: tensor of shape [N, M] representing any similarity metric. Returns: Match object with corresponding matches for each of M columns. """ if similarity_matrix.shape[0] == 0: return Match(self._match_when_rows_are_empty(similarity_matrix)) else: return Match(self._match_when_rows_are_non_empty(similarity_matrix)) def _set_values_using_indicator(self, x, indicator, val: int): """Set the indicated fields of x to val. Args: x: tensor. indicator: boolean with same shape as x. val: scalar with value to set. Returns: modified tensor. """ indicator = indicator.to(dtype=x.dtype) return x * (1 - indicator) + val * indicator
PyTorch/Classification/GPUNet/triton/05ms-D
05ms-D
README
# Deploying the GPUNet model on Triton Inference Server This folder contains instructions for deployment to run inference on Triton Inference Server as well as a detailed performance analysis. The purpose of this document is to help you with achieving the best inference performance. ## Table of contents - [Solution overview](#solution-overview) - [Introduction](#introduction) - [Deployment process](#deployment-process) - [Setup](#setup) - [Quick Start Guide](#quick-start-guide) - [Performance](#performance) - [Offline scenario](#offline-scenario) - [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16) - [Online scenario](#online-scenario) - [Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#online-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16) - [Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16) - [Advanced](#advanced) - [Step by step deployment process](#step-by-step-deployment-process) - [Latency explanation](#latency-explanation) - [Release notes](#release-notes) - [Changelog](#changelog) - [Known issues](#known-issues) ## Solution overview ### Introduction The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. This README provides step-by-step deployment instructions for models generated during training (as described in the [model README](../readme.md)). Additionally, this README provides the corresponding deployment scripts that ensure optimal GPU utilization during inferencing on Triton Inference Server. ### Deployment process The deployment process consists of two steps: 1. Conversion. The purpose of conversion is to find the best performing model format supported by Triton Inference Server. Triton Inference Server uses a number of runtime backends such as [TensorRT](https://developer.nvidia.com/tensorrt), [LibTorch](https://github.com/triton-inference-server/pytorch_backend) and [ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend) to support various model types. Refer to the [Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton) for a list of available backends. 2. Configuration. Model configuration on Triton Inference Server, which generates necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md). After deployment Triton inference server is used for evaluation of converted model in two steps: 1. Correctness tests. Produce results which are tested against given correctness thresholds. 2. Performance tests. Produce latency and throughput results for offline (static batching) and online (dynamic batching) scenarios. All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide) ## Setup Ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [NVIDIA PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) * [NVIDIA Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver) * [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html) * [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU ## Quick Start Guide Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples.git cd PyTorch/Classification/GPUNet ``` 2. Prepare dataset. See the [Quick Start Guide](../../README.md#prepare-the-dataset) 3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies. ``` ./triton/scripts/docker/build.sh ./triton/scripts/docker/interactive.sh /path/to/imagenet/val/ ``` 4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU). ``` NVIDIA DGX-1 (1x V100 32GB): ./triton/05ms-D/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh NVIDIA DGX A100 (1x A100 80GB): ./triton/05ms-D/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh ``` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Offline scenario The offline scenario assumes the client and server are located on the same host. The tests uses: - tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used - single request is send from client to server with static size of batch #### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td> </tr> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 957.00 | 0.05 | 0.20 | 0.07 | 0.09 | 0.63 | 0.01 | 0.00 | 1.04 | 1.08 | 1.09 | 1.12 | 1.04 | | 2 | 1 | 1628.00 | 0.05 | 0.21 | 0.07 | 0.14 | 0.75 | 0.01 | 0.00 | 1.22 | 1.26 | 1.27 | 1.29 | 1.22 | | 4 | 1 | 2508.00 | 0.04 | 0.21 | 0.08 | 0.23 | 1.02 | 0.01 | 0.00 | 1.59 | 1.62 | 1.62 | 1.68 | 1.59 | | 8 | 1 | 3712.00 | 0.04 | 0.19 | 0.07 | 0.35 | 1.49 | 0.01 | 0.00 | 2.14 | 2.19 | 2.23 | 2.28 | 2.15 | | 16 | 1 | 4912.00 | 0.04 | 0.22 | 0.08 | 0.57 | 2.33 | 0.01 | 0.00 | 3.25 | 3.28 | 3.29 | 3.31 | 3.25 | | 32 | 1 | 5856.00 | 0.05 | 0.23 | 0.08 | 1.02 | 4.03 | 0.02 | 0.00 | 5.43 | 5.48 | 5.50 | 5.55 | 5.44 | | 64 | 1 | 6656.00 | 0.05 | 0.22 | 0.08 | 1.91 | 7.28 | 0.03 | 0.00 | 9.58 | 9.63 | 9.63 | 9.64 | 9.57 | </details> #### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td> </tr> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1559.00 | 0.02 | 0.08 | 0.02 | 0.06 | 0.45 | 0.00 | 0.00 | 0.64 | 0.65 | 0.65 | 0.66 | 0.64 | | 2 | 1 | 2796.00 | 0.02 | 0.07 | 0.02 | 0.10 | 0.50 | 0.00 | 0.00 | 0.71 | 0.72 | 0.72 | 0.73 | 0.71 | | 4 | 1 | 4640.00 | 0.02 | 0.07 | 0.02 | 0.15 | 0.60 | 0.00 | 0.00 | 0.86 | 0.89 | 0.89 | 1.03 | 0.86 | | 8 | 1 | 6984.00 | 0.02 | 0.07 | 0.02 | 0.21 | 0.82 | 0.00 | 0.00 | 1.14 | 1.15 | 1.18 | 1.33 | 1.14 | | 16 | 1 | 9136.00 | 0.02 | 0.08 | 0.03 | 0.36 | 1.26 | 0.01 | 0.00 | 1.75 | 1.76 | 1.77 | 1.78 | 1.75 | | 32 | 1 | 9664.00 | 0.02 | 0.10 | 0.03 | 0.93 | 2.21 | 0.01 | 0.00 | 3.30 | 3.33 | 3.37 | 3.48 | 3.30 | | 64 | 1 | 9728.00 | 0.03 | 0.18 | 0.03 | 2.15 | 4.12 | 0.02 | 0.00 | 6.50 | 6.62 | 6.96 | 7.11 | 6.54 | </details> ### Online scenario The online scenario assumes the client and server are located on different hosts. The tests uses: - tensors are passed through HTTP from client to server - concurrent requests are send from client to server, the final batch is created on server side #### Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 8 | 984.00 | 0.07 | 0.55 | 5.14 | 0.11 | 2.21 | 0.01 | 0.00 | 8.29 | 9.23 | 9.29 | 9.42 | 8.10 | | 1 | 16 | 1553.00 | 0.08 | 1.23 | 5.87 | 0.29 | 2.76 | 0.01 | 0.00 | 10.84 | 11.38 | 11.96 | 12.93 | 10.24 | | 1 | 24 | 2024.00 | 0.08 | 1.96 | 6.28 | 0.53 | 2.91 | 0.02 | 0.00 | 11.65 | 14.92 | 15.66 | 16.24 | 11.80 | | 1 | 32 | 2559.00 | 0.09 | 1.80 | 6.81 | 0.60 | 3.10 | 0.02 | 0.00 | 12.55 | 13.47 | 13.75 | 15.05 | 12.41 | | 1 | 40 | 2714.29 | 0.09 | 2.90 | 7.18 | 0.88 | 3.58 | 0.03 | 0.00 | 14.48 | 17.66 | 18.68 | 19.73 | 14.65 | | 1 | 48 | 2841.00 | 0.10 | 3.81 | 7.64 | 1.27 | 3.86 | 0.03 | 0.00 | 16.43 | 21.62 | 22.97 | 23.61 | 16.72 | | 1 | 56 | 3109.00 | 0.11 | 4.22 | 8.16 | 1.45 | 3.90 | 0.04 | 0.00 | 18.17 | 22.14 | 23.56 | 25.39 | 17.87 | | 1 | 64 | 3243.00 | 0.10 | 4.71 | 8.91 | 1.62 | 4.18 | 0.04 | 0.00 | 20.09 | 23.77 | 25.26 | 27.47 | 19.56 | | 1 | 72 | 3576.00 | 0.11 | 3.76 | 9.98 | 1.74 | 4.33 | 0.04 | 0.00 | 19.97 | 23.78 | 24.74 | 26.48 | 19.95 | | 1 | 80 | 3687.00 | 0.11 | 4.50 | 10.20 | 2.03 | 4.67 | 0.05 | 0.00 | 21.69 | 27.08 | 28.44 | 29.97 | 21.55 | | 1 | 88 | 3670.00 | 0.12 | 6.86 | 9.65 | 2.27 | 4.78 | 0.05 | 0.00 | 24.56 | 29.08 | 29.97 | 32.79 | 23.72 | | 1 | 96 | 3811.00 | 0.12 | 6.57 | 10.81 | 2.40 | 4.98 | 0.05 | 0.00 | 25.55 | 30.15 | 31.15 | 33.31 | 24.93 | | 1 | 104 | 3999.00 | 0.13 | 6.21 | 11.44 | 2.77 | 5.20 | 0.06 | 0.00 | 26.24 | 31.58 | 32.66 | 36.68 | 25.80 | | 1 | 112 | 4207.00 | 0.14 | 6.20 | 11.88 | 2.78 | 5.22 | 0.06 | 0.00 | 26.54 | 32.07 | 33.48 | 35.21 | 26.27 | | 1 | 120 | 4105.00 | 0.15 | 7.46 | 12.06 | 3.35 | 5.81 | 0.07 | 0.00 | 29.28 | 37.15 | 39.06 | 40.72 | 28.90 | | 1 | 128 | 4316.00 | 0.16 | 6.62 | 13.26 | 3.23 | 5.83 | 0.08 | 0.00 | 29.48 | 35.96 | 37.67 | 40.26 | 29.17 | | 1 | 136 | 4406.00 | 0.17 | 5.64 | 14.81 | 3.43 | 6.14 | 0.07 | 0.00 | 30.14 | 38.87 | 40.51 | 42.31 | 30.27 | | 1 | 144 | 4339.00 | 0.16 | 8.84 | 13.59 | 3.71 | 6.15 | 0.08 | 0.00 | 33.02 | 40.36 | 43.51 | 46.66 | 32.53 | | 1 | 152 | 4478.00 | 0.19 | 7.40 | 15.32 | 3.97 | 6.44 | 0.09 | 0.00 | 33.97 | 41.65 | 43.14 | 47.27 | 33.42 | | 1 | 160 | 4520.00 | 0.18 | 8.69 | 14.84 | 4.11 | 6.78 | 0.10 | 0.00 | 34.65 | 43.75 | 46.05 | 48.88 | 34.69 | | 1 | 168 | 4487.00 | 0.18 | 8.69 | 15.98 | 4.68 | 6.99 | 0.10 | 0.00 | 37.31 | 47.19 | 49.26 | 53.46 | 36.62 | | 1 | 176 | 4608.39 | 0.18 | 9.66 | 16.28 | 4.42 | 6.82 | 0.10 | 0.00 | 38.30 | 46.18 | 48.55 | 52.57 | 37.47 | | 1 | 184 | 4646.00 | 0.22 | 8.82 | 17.11 | 4.96 | 7.28 | 0.11 | 0.00 | 39.26 | 48.00 | 49.24 | 51.92 | 38.51 | | 1 | 192 | 4646.00 | 0.21 | 9.83 | 17.98 | 4.81 | 7.38 | 0.12 | 0.00 | 40.34 | 51.41 | 53.30 | 57.10 | 40.33 | | 1 | 200 | 4809.00 | 0.26 | 8.54 | 19.52 | 4.86 | 7.26 | 0.11 | 0.00 | 40.81 | 50.18 | 51.57 | 56.27 | 40.54 | | 1 | 208 | 4866.00 | 0.33 | 8.25 | 20.32 | 5.10 | 7.85 | 0.12 | 0.00 | 42.63 | 51.31 | 52.64 | 54.30 | 41.96 | | 1 | 216 | 4912.00 | 0.40 | 7.34 | 22.29 | 5.12 | 7.78 | 0.12 | 0.00 | 42.34 | 53.43 | 55.42 | 58.20 | 43.04 | | 1 | 224 | 4927.00 | 0.30 | 9.04 | 21.42 | 5.29 | 7.87 | 0.12 | 0.00 | 43.46 | 55.32 | 57.61 | 61.31 | 44.04 | | 1 | 232 | 4840.00 | 0.26 | 12.65 | 20.39 | 5.44 | 7.89 | 0.12 | 0.00 | 47.21 | 58.24 | 62.56 | 76.19 | 46.76 | | 1 | 240 | 5044.00 | 0.35 | 10.44 | 22.00 | 5.46 | 7.97 | 0.12 | 0.00 | 46.40 | 55.91 | 58.63 | 62.81 | 46.35 | | 1 | 248 | 4955.00 | 0.32 | 12.14 | 22.27 | 5.39 | 8.04 | 0.13 | 0.00 | 47.10 | 62.52 | 65.31 | 69.14 | 48.29 | | 1 | 256 | 5236.00 | 0.52 | 7.19 | 26.54 | 5.02 | 8.37 | 0.14 | 0.00 | 48.18 | 55.77 | 57.99 | 63.11 | 47.79 | </details> #### Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 8 | 1646.00 | 0.04 | 0.37 | 3.01 | 0.13 | 1.29 | 0.01 | 0.00 | 4.78 | 5.51 | 5.60 | 5.77 | 4.85 | | 1 | 16 | 2536.00 | 0.07 | 0.89 | 3.46 | 0.29 | 1.56 | 0.01 | 0.00 | 6.38 | 7.26 | 7.64 | 8.40 | 6.29 | | 1 | 24 | 3223.00 | 0.06 | 1.29 | 3.85 | 0.50 | 1.70 | 0.02 | 0.00 | 7.43 | 9.07 | 9.41 | 10.36 | 7.42 | | 1 | 32 | 3705.00 | 0.06 | 1.88 | 4.12 | 0.70 | 1.82 | 0.02 | 0.00 | 8.60 | 10.84 | 11.17 | 11.94 | 8.61 | | 1 | 40 | 4120.00 | 0.06 | 2.18 | 4.64 | 0.83 | 1.92 | 0.03 | 0.00 | 9.84 | 11.98 | 12.41 | 13.11 | 9.66 | | 1 | 48 | 4495.00 | 0.06 | 2.78 | 4.79 | 0.98 | 1.99 | 0.03 | 0.00 | 10.92 | 12.96 | 13.42 | 14.64 | 10.64 | | 1 | 56 | 4858.14 | 0.07 | 2.80 | 5.19 | 1.20 | 2.18 | 0.04 | 0.00 | 11.51 | 14.78 | 15.48 | 16.59 | 11.49 | | 1 | 64 | 5222.78 | 0.07 | 3.20 | 5.40 | 1.24 | 2.26 | 0.04 | 0.00 | 12.51 | 15.17 | 16.02 | 17.54 | 12.21 | | 1 | 72 | 5323.00 | 0.07 | 3.92 | 5.55 | 1.43 | 2.42 | 0.05 | 0.00 | 13.82 | 17.24 | 17.90 | 19.73 | 13.44 | | 1 | 80 | 5826.00 | 0.06 | 3.55 | 6.06 | 1.41 | 2.52 | 0.06 | 0.00 | 13.85 | 17.40 | 18.55 | 19.70 | 13.66 | | 1 | 88 | 5747.25 | 0.06 | 4.85 | 5.96 | 1.62 | 2.61 | 0.06 | 0.00 | 15.63 | 19.59 | 20.38 | 21.87 | 15.17 | | 1 | 96 | 5883.00 | 0.08 | 4.42 | 6.99 | 1.96 | 2.68 | 0.07 | 0.00 | 16.41 | 20.70 | 21.62 | 25.46 | 16.20 | | 1 | 104 | 6167.00 | 0.07 | 4.41 | 7.05 | 2.24 | 2.91 | 0.08 | 0.00 | 16.78 | 21.72 | 22.90 | 24.28 | 16.76 | | 1 | 112 | 6117.00 | 0.07 | 4.89 | 7.27 | 2.52 | 3.22 | 0.09 | 0.00 | 18.58 | 22.70 | 23.52 | 25.27 | 18.07 | | 1 | 120 | 6635.00 | 0.08 | 4.06 | 8.29 | 2.36 | 3.07 | 0.08 | 0.00 | 18.16 | 22.76 | 24.16 | 26.61 | 17.94 | | 1 | 128 | 6457.00 | 0.08 | 5.64 | 7.93 | 2.63 | 3.24 | 0.10 | 0.00 | 19.73 | 26.09 | 26.80 | 27.30 | 19.62 | | 1 | 136 | 6808.19 | 0.08 | 4.58 | 9.03 | 2.72 | 3.33 | 0.10 | 0.00 | 20.04 | 25.08 | 26.65 | 28.96 | 19.84 | | 1 | 144 | 6703.00 | 0.07 | 6.09 | 8.24 | 3.12 | 3.60 | 0.12 | 0.00 | 21.88 | 26.14 | 27.44 | 28.78 | 21.24 | | 1 | 152 | 7450.00 | 0.09 | 3.81 | 10.14 | 2.45 | 3.56 | 0.12 | 0.00 | 20.27 | 25.02 | 26.31 | 28.84 | 20.17 | | 1 | 160 | 7214.78 | 0.08 | 5.87 | 9.28 | 2.75 | 3.80 | 0.12 | 0.00 | 21.97 | 27.62 | 29.16 | 30.83 | 21.89 | | 1 | 168 | 7368.00 | 0.08 | 6.10 | 9.50 | 2.79 | 3.85 | 0.13 | 0.00 | 22.92 | 27.76 | 29.00 | 30.60 | 22.45 | | 1 | 176 | 7483.00 | 0.08 | 5.84 | 10.45 | 2.96 | 3.74 | 0.13 | 0.00 | 23.57 | 28.50 | 30.22 | 33.26 | 23.19 | | 1 | 184 | 7559.00 | 0.08 | 6.50 | 10.21 | 3.18 | 4.00 | 0.13 | 0.00 | 24.17 | 29.87 | 30.93 | 33.18 | 24.10 | | 1 | 192 | 7587.00 | 0.08 | 6.60 | 10.78 | 3.27 | 4.01 | 0.14 | 0.00 | 25.20 | 30.48 | 31.67 | 34.83 | 24.88 | | 1 | 200 | 7490.00 | 0.08 | 7.83 | 10.70 | 3.39 | 4.11 | 0.14 | 0.00 | 26.94 | 31.98 | 33.71 | 35.97 | 26.24 | | 1 | 208 | 7731.00 | 0.09 | 6.91 | 11.96 | 3.45 | 4.03 | 0.14 | 0.00 | 26.95 | 32.35 | 33.63 | 36.61 | 26.57 | | 1 | 216 | 7735.00 | 0.09 | 7.30 | 11.76 | 3.62 | 4.57 | 0.16 | 0.00 | 27.36 | 34.09 | 35.66 | 37.99 | 27.51 | | 1 | 224 | 8244.00 | 0.09 | 6.21 | 12.52 | 3.24 | 4.47 | 0.15 | 0.00 | 26.44 | 32.87 | 34.35 | 37.15 | 26.69 | | 1 | 232 | 8148.00 | 0.12 | 6.22 | 13.63 | 3.41 | 4.48 | 0.16 | 0.00 | 28.24 | 34.21 | 35.99 | 39.36 | 28.03 | | 1 | 240 | 7768.23 | 0.09 | 10.38 | 12.38 | 3.26 | 4.12 | 0.14 | 0.00 | 29.42 | 40.59 | 42.10 | 44.21 | 30.37 | | 1 | 248 | 8296.00 | 0.12 | 6.08 | 14.71 | 3.78 | 4.54 | 0.16 | 0.00 | 29.78 | 34.53 | 35.91 | 37.65 | 29.40 | | 1 | 256 | 8153.00 | 0.09 | 7.73 | 14.47 | 3.82 | 4.72 | 0.16 | 0.00 | 31.49 | 37.20 | 38.42 | 41.19 | 30.99 | </details> ## Advanced | Inference runtime | Mnemonic used in scripts | |-------------------|--------------------------| | [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` | | [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` | | [ONNX](https://onnx.ai) | `onnx` | | [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` | ### Step by step deployment process Commands described below can be used for exporting, converting and profiling the model. #### Clone Repository IMPORTANT: This step is executed on the host computer. <details> <summary>Clone Repository Command</summary> ```shell git clone https://github.com/NVIDIA/DeepLearningExamples.git cd PyTorch/Classification/GPUNet ``` </details> #### Start Triton Inference Server Setup the environment in the host computer and start Triton Inference Server. <details> <summary>Setup Environment and Start Triton Inference Server Command</summary> ```shell source ./triton/scripts/setup_environment.sh ./triton/scripts/docker/triton_inference_server.sh ``` </details> #### Prepare Dataset. Please use the data download from the [Main QSG](../../README.md#prepare-the-dataset) #### Prepare Checkpoint Please download a checkpoint from [here](https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p0_pyt_ckpt/versions/21.12.0_amp/zip) and place it in `runner_workspace/checkpoints/0.5ms-D/`. Note that the `0.5ms-D` subdirectory may not be created yet. #### Setup Container Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies. <details> <summary>Setup Container Command</summary> Build container: ```shell ./triton/scripts/docker/build.sh ``` Run container in interactive mode: ```shell ./triton/scripts/docker/interactive.sh /path/to/imagenet/val/ ``` Setup environment in order to share artifacts in steps and with Triton Inference Server: ```shell source ./triton/scripts/setup_environment.sh ``` </details> #### Prepare configuration You can use the environment variables to set the parameters of your inference configuration. Example values of some key variables in one configuration: <details> <summary>Export Variables</summary> ```shell export FORMAT="onnx" export PRECISION="fp16" export EXPORT_FORMAT="onnx" export EXPORT_PRECISION="fp16" export BACKEND_ACCELERATOR="trt" export NUMBER_OF_MODEL_INSTANCES="2" export TENSORRT_CAPTURE_CUDA_GRAPH="0" export CHECKPOINT="0.5ms-D" export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT} ``` </details> #### Export Model Export model from Python source to desired format (e.g. Savedmodel or TorchScript) <details> <summary>Export Model Command</summary> ```shell if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --ignore-unknown-parameters \ --onnx-opset 13 \ --torch-jit none \ \ --config /workspace/gpunet/configs/batch1/GV100/0.5ms-D.json \ --checkpoint ${CHECKPOINT_DIR}/0.5ms-D.pth.tar \ --precision ${EXPORT_PRECISION} \ \ --dataloader triton/dataloader.py \ --val-path ${DATASETS_DIR}/ \ --is-prunet True \ --batch-size 1 ``` </details> #### Convert Model Convert the model from training to inference format (e.g. TensorRT). <details> <summary>Convert Model Command</summary> ```shell if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --max-batch-size 64 \ --container-version 21.12 \ --max-workspace-size 10000000000 \ --atol OUTPUT__0=100 \ --rtol OUTPUT__0=100 ``` </details> #### Deploy Model Configure the model on Triton Inference Server. Generate the configuration from your model repository. <details> <summary>Deploy Model Command</summary> ```shell model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${FORMAT} \ --model-control-mode explicit \ --load-model \ --load-model-timeout-s 100 \ --verbose \ \ --backend-accelerator ${BACKEND_ACCELERATOR} \ --tensorrt-precision ${PRECISION} \ --tensorrt-capture-cuda-graph \ --tensorrt-max-workspace-size 10000000000 \ --max-batch-size 64 \ --batching dynamic \ --preferred-batch-sizes 64 \ --engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES} ``` </details> #### Triton Performance Offline Test We want to maximize throughput. It assumes you have your data available for inference or that your data saturate to maximum batch size quickly. Triton Inference Server supports offline scenarios with static batching. Static batching allows inference requests to be served as they are received. The largest improvements to throughput come from increasing the batch size due to efficiency gains in the GPU with larger batches. <details> <summary>Triton Performance Offline Test Command</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 2 4 8 16 32 64 \ --concurrency 1 \ --evaluation-mode offline \ --measurement-request-count 10 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv ``` </details> #### Triton Performance Online Test We want to maximize throughput within latency budget constraints. Dynamic batching is a feature of Triton Inference Server that allows inference requests to be combined by the server, so that a batch is created dynamically, resulting in a reduced average latency. <details> <summary>Triton Performance Online Test</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 \ --concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \ --evaluation-mode online \ --measurement-request-count 500 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_online.csv ``` </details> ### Latency explanation A typical Triton Inference Server pipeline can be broken down into the following steps: 1. The client serializes the inference request into a message and sends it to the server (Client Send). 2. The message travels over the network from the client to the server (Network). 3. The message arrives at the server and is deserialized (Server Receive). 4. The request is placed on the queue (Server Queue). 5. The request is removed from the queue and computed (Server Compute). 6. The completed request is serialized in a message and sent back to the client (Server Send). 7. The completed message then travels over the network from the server to the client (Network). 8. The completed message is deserialized by the client and processed as a completed inference request (Client Receive). Generally, for local clients, steps 1-4 and 6-8 will only occupy a small fraction of time, compared to step 5. In distributed systems and online processing where client and server side are connect through network, the send and receive steps might have impact on overall processing performance. In order to analyze the possible bottlenecks the detailed charts are presented in online scenario cases. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog May 2022 - Initial release ### Known issues - There are no known issues with this model.
PyTorch/Classification/ConvNets/efficientnet/training/AMP
AMP
DGX1V-16G_efficientnet-widese-b0_AMP
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision AMP --mode convergence --platform DGX1V-16G /imagenet --workspace ${1:-./} --raport-file raport.json
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/optimizer
optimizer
RMSprop
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: torch.optim.RMSprop lr: 0.01 alpha: 0.99 eps: 1e-8 weight_decay: 0.0 momentum: 0.0 centered: False
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/AMP
AMP
train_benchmark_8xA100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode train_and_eval \ --use_amp \ --use_xla \ --model_dir ./output/ \ --data_dir /data/ \ --log_steps 500 \ --save_checkpoint_freq 10 \ --n_stages 1 \ --max_epochs 3 \ --steps_per_epoch 2000 \ --train_batch_size 460 \ --train_img_size 300 \ --lr_decay cosine \ --lr_init 0.005 \ --weight_decay .000005 \ --opt_epsilon 0.001 \ --moving_average_decay 0.9999 \ --eval_img_size 384 \ --eval_batch_size 128 \ --augmenter_name randaugment \ --raug_num_layers 2 \ --raug_magnitude 15 \ --cutmix_alpha 0 \ --mixup_alpha 0 \ --defer_img_mixing
PyTorch/Segmentation/nnUNet/triton
triton
convert_model
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" `convert_model.py` script allows to convert between model formats with additional model optimizations for faster inference. It converts model from results of get_model function. Currently supported input and output formats are: - inputs - `tf-estimator` - `get_model` function returning Tensorflow Estimator - `tf-keras` - `get_model` function returning Tensorflow Keras Model - `tf-savedmodel` - Tensorflow SavedModel binary - `pyt` - `get_model` function returning PyTorch Module - output - `tf-savedmodel` - Tensorflow saved model - `tf-trt` - TF-TRT saved model - `ts-trace` - PyTorch traced ScriptModule - `ts-script` - PyTorch scripted ScriptModule - `onnx` - ONNX - `trt` - TensorRT plan file For tf-keras input you can use: - --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB - --tf-allow-growth flag - control limiting GPU memory growth feature (https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled. """ import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import ( DATALOADER_FN_NAME, BaseConverter, BaseLoader, BaseSaver, Format, Precision, load_from_file, ) from .deployment_toolkit.extensions import converters, loaders, savers LOGGER = logging.getLogger("convert_model") INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT] OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT] def _get_args(): parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True) parser.add_argument( "--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True ) parser.add_argument("--output-path", help="Path to output model file", required=True) parser.add_argument( "--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True ) parser.add_argument("--dataloader", help="Path to python module containing data loader") parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) converter_name = f"{args.input_type}--{args.output_type}" Converter: BaseConverter = converters.get(converter_name) if Converter is not None: ArgParserGenerator(Converter).update_argparser(parser) Saver: BaseSaver = savers.get(args.output_type) ArgParserGenerator(Saver).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") requested_model_precision = Precision(args.precision) dataloader_fn = None # if conversion is required, temporary change model load precision to that required by converter # it is for TensorRT converters which require fp32 models for all requested precisions converter_name = f"{args.input_type}--{args.output_type}" Converter: BaseConverter = converters.get(converter_name) if Converter: args.precision = Converter.required_source_model_precision(requested_model_precision).value Loader: BaseLoader = loaders.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) model = loader.load(args.input_path) LOGGER.info("inputs: %s", model.inputs) LOGGER.info("outputs: %s", model.outputs) if Converter: # if conversion is needed # dataloader must much source model precision - so not recovering it yet if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) # recover precision to that requested by user args.precision = requested_model_precision.value if Converter: converter = ArgParserGenerator(Converter).from_args(args) model = converter.convert(model, dataloader_fn=dataloader_fn) Saver: BaseSaver = savers.get(args.output_type) saver = ArgParserGenerator(Saver).from_args(args) saver.save(model, args.output_path) return 0 if __name__ == "__main__": main()
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact
tensorflow-dot-based-interact
setup
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Setup for pip package.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from setuptools import Extension from setuptools import find_packages from setuptools import setup from setuptools.dist import Distribution __version__ = '0.0.1' REQUIRED_PACKAGES = [ 'tensorflow >= 2.3.1', ] project_name = 'tensorflow-dot-based-interact' from setuptools.command.install import install class InstallPlatlib(install): def finalize_options(self): install.finalize_options(self) self.install_lib = self.install_platlib class BinaryDistribution(Distribution): """This class is needed in order to create OS specific wheels.""" def has_ext_modules(self): return True def is_pure(self): return False setup( name=project_name, version=__version__, description=('tensorflow-dot-based-interact is a CUDA Dot Based Interact custom op for TensorFlow'), author='NVIDIA Corporation', author_email='info@nvidia.com', # Contained modules and scripts. packages=find_packages(), install_requires=REQUIRED_PACKAGES, # Add in any packaged data. include_package_data=True, zip_safe=False, distclass=BinaryDistribution, cmdclass={'install': InstallPlatlib}, # PyPI package information. classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.8', 'Topic :: Scientific/Engineering :: Mathematics', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Libraries', ], license='Apache 2.0', keywords='tensorflow custom op machine learning', )
PyTorch/Classification/GPUNet/triton/175ms/runner
runner
pipeline_impl
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.pipeline import Pipeline pipeline = Pipeline() pipeline.model_export( commands=( r""" if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --ignore-unknown-parameters \ --onnx-opset 13 \ --torch-jit ${TORCH_JIT} \ \ --config /workspace/gpunet/configs/batch1/GV100/1.75ms.json \ --checkpoint ${CHECKPOINT_DIR}/1.75ms.pth.tar \ --precision ${EXPORT_PRECISION} \ \ --dataloader triton/dataloader.py \ --val-path ${DATASETS_DIR}/ \ --is-prunet False \ --batch-size 1 """, ) ) pipeline.model_conversion( commands=( r""" if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --max-batch-size ${MAX_BATCH_SIZE} \ --container-version 21.12 \ --max-workspace-size 10000000000 \ --atol OUTPUT__0=100 \ --rtol OUTPUT__0=100 """, ) ) pipeline.model_deploy( commands=( r""" model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${FORMAT} \ --model-control-mode explicit \ --load-model \ --load-model-timeout-s 100 \ --verbose \ \ --backend-accelerator ${BACKEND_ACCELERATOR} \ --tensorrt-precision ${PRECISION} \ --tensorrt-capture-cuda-graph \ --tensorrt-max-workspace-size 10000000000 \ --max-batch-size ${MAX_BATCH_SIZE} \ --batching ${MODEL_BATCHING} \ --preferred-batch-sizes ${MAX_BATCH_SIZE} \ --engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES} """, ) ) pipeline.triton_performance_offline_tests( commands=( r""" python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 2 4 8 16 32 64 \ --concurrency 1 \ --evaluation-mode offline \ --measurement-request-count 10 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv """, ), result_path="${SHARED_DIR}/triton_performance_offline.csv", ) pipeline.triton_performance_online_tests( commands=( r""" python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 \ --concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \ --evaluation-mode online \ --measurement-request-count 500 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_online.csv """, ), result_path="${SHARED_DIR}/triton_performance_online.csv", )
Tools/DGLPyTorch/SyntheticGraphGeneration/scripts
scripts
ieee_fraud
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import pandas as pd import numpy as np from pathlib import Path if __name__ == '__main__': data_path = sys.argv[1] # - path containing ieee-fraud-detection data # https://www.kaggle.com/competitions/ieee-fraud-detection data_path = Path(data_path) # - concat data files train_trn = pd.read_csv(data_path / 'train_transaction.csv') test_trn = pd.read_csv(data_path / 'test_transaction.csv') # - not every transactionID has an associated transaction identification ... data = pd.concat([train_trn, test_trn], axis=0) user_cols = ['addr1', 'addr2', 'card1', 'card2', 'card3', 'card4', 'card5', 'card6'] # - product columns that can be used to create unique id product_cols = ['ProductCD', 'R_emaildomain'] for c in user_cols: data.loc[:, c] = data[c].fillna('').astype(str) for c in product_cols: data.loc[:, c] = data[c].fillna('').astype(str) data['user_id'] = '' user_cols_selected = ['card1'] # - select only card1 for c in user_cols_selected: data.loc[:, 'user_id'] = data['user_id'] + data[c] data['product_id'] = '' for c in product_cols: data.loc[:, 'product_id'] = data['product_id'] + data[c] # - drop id cols data.drop(columns=user_cols + product_cols, inplace=True) # - select last transaction data = data.sort_values('TransactionDT').groupby(['user_id', 'product_id']).tail(1) # - dump data save_path = os.path.join(data_path, 'data.csv') data.to_csv(save_path, index=False)
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer
data_transformer
base_data_transformer
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC class BaseDataTransformer(ABC): """Base class for all data transformers. The `BaseDataTransformer` provides the transformation required by generators to transform (encode) and inverse_transform (decode) data. It contains the `fit`, `transform`, `inverse_transform`, and `get_metadata` functions that must be implemented by specific data transformer objects. """ def fit(self, data): """Fits the data transform to the data. This is optional Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: None """ pass def transform(self, data): """Transform the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: numpy.array: Transformed data. """ raise NotImplementedError() def fit_transform(self, data): """Fit to the data and then return the transformed data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to fit and transform Returns: Transformed data. """ self.fit(data) return self.transform(data) def inverse_transform(self, data): """Reverses the transformation done on the data back to original values. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to inverse-transform. Returns: raw_data: inverse transformed data """ raise NotImplementedError()
TensorFlow2/LanguageModeling/BERT/official/utils/misc
misc
tpu_lib
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Initializes TPU system for TF 2.0.""" import tensorflow as tf def tpu_initialize(tpu_address): """Initializes TPU for TF 2.0 training. Args: tpu_address: string, bns address of master TPU worker. Returns: A TPUClusterResolver. """ cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu=tpu_address) if tpu_address not in ('', 'local'): tf.config.experimental_connect_to_cluster(cluster_resolver) tf.tpu.experimental.initialize_tpu_system(cluster_resolver) return cluster_resolver
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/inference
inference
inference_AMP
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python3 main.py --cfg config/efficientnet_v1/b4_cfg.py \ --mode predict \ --use_amp \ --use_xla \ --predict_ckpt /model \ --predict_img_dir /infer_data \ --predict_batch_size 50 \ --predict_img_size 380
PyTorch/Recommendation/DLRM
DLRM
requirements_preprocessing
numpy pandas joblib==0.16 tqdm
TensorFlow/Segmentation/UNet_Medical/tf_exports
tf_exports
tf_export
import glob import inspect import os import shutil import subprocess from typing import List, Callable import tensorflow as tf from google.protobuf import text_format from tensorflow.core.framework import graph_pb2 from tensorflow.python.compiler.tensorrt import trt_convert as trt from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_io from tensorflow.python.platform import gfile from tensorflow.python.tools import optimize_for_inference_lib os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" def _compress(src_path: str, dst_path: str): """ Compress source path into destination path :param src_path: (str) Source path :param dst_path: (str) Destination path """ print('[*] Compressing...') shutil.make_archive(dst_path, 'zip', src_path) print('[*] Compressed the contents in: {}.zip'.format(dst_path)) def _print_input(func: Callable): """ Decorator printing function name and args :param func: (Callable) Decorated function :return: Wrapped call """ def wrapper(*args, **kwargs): """ Print the name and arguments of a function :param args: Named arguments :param kwargs: Keyword arguments :return: Original function call """ tf.logging.set_verbosity(tf.logging.ERROR) func_args = inspect.signature(func).bind(*args, **kwargs).arguments func_args_str = ''.join('\t{} = {!r}\n'.format(*item) for item in func_args.items()) print('[*] Running \'{}\' with arguments:'.format(func.__qualname__)) print(func_args_str[:-1]) return func(*args, **kwargs) return wrapper def _parse_placeholder_types(values: str): """ Extracts placeholder types from a comma separate list. :param values: (str) Placeholder types :return: (List) Placeholder types """ values = [int(value) for value in values.split(",")] return values if len(values) > 1 else values[0] def _optimize_checkpoint_for_inference(graph_path: str, input_names: List[str], output_names: List[str]): """ Removes Horovod and training related information from the graph :param graph_path: (str) Path to the graph.pbtxt file :param input_names: (str) Input node names :param output_names: (str) Output node names """ print('[*] Optimizing graph for inference ...') input_graph_def = graph_pb2.GraphDef() with gfile.Open(graph_path, "rb") as f: data = f.read() text_format.Merge(data.decode("utf-8"), input_graph_def) output_graph_def = optimize_for_inference_lib.optimize_for_inference( input_graph_def, input_names, output_names, _parse_placeholder_types(str(dtypes.float32.as_datatype_enum)), False) print('[*] Saving original graph in: {}'.format(graph_path + '.old')) shutil.move(graph_path, graph_path + '.old') print('[*] Writing down optimized graph ...') graph_io.write_graph(output_graph_def, os.path.dirname(graph_path), os.path.basename(graph_path)) @_print_input def to_savedmodel(input_shape: str, model_fn: Callable, checkpoint_dir: str, output_dir: str, input_names: List[str], output_names: List[str], use_amp: bool, use_xla: bool, compress: bool): """ Export checkpoint to Tensorflow savedModel :param input_shape: (str) Input shape to the model in format [batch, height, width, channels] :param model_fn: (Callable) Estimator's model_fn :param checkpoint_dir: (str) Directory where checkpoints are stored :param output_dir: (str) Output directory for storage of the generated savedModel :param input_names: (List[str]) Input node names :param output_names: (List[str]) Output node names :param use_amp: (bool )Enable TF-AMP :param use_xla: (bool) Enable XLA :param compress: (bool) Compress output """ assert os.path.exists(checkpoint_dir), 'Path not found: {}'.format(checkpoint_dir) assert input_shape is not None, 'Input shape must be provided' _optimize_checkpoint_for_inference(os.path.join(checkpoint_dir, 'graph.pbtxt'), input_names, output_names) try: ckpt_path = os.path.splitext([p for p in glob.iglob(os.path.join(checkpoint_dir, '*.index'))][0])[0] except IndexError: raise ValueError('Could not find checkpoint in directory: {}'.format(checkpoint_dir)) config_proto = tf.compat.v1.ConfigProto() config_proto.allow_soft_placement = True config_proto.log_device_placement = False config_proto.gpu_options.allow_growth = True config_proto.gpu_options.force_gpu_compatible = True if use_amp: os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1" if use_xla: config_proto.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1 run_config = tf.estimator.RunConfig( model_dir=None, tf_random_seed=None, save_summary_steps=1e9, # disabled save_checkpoints_steps=None, save_checkpoints_secs=None, session_config=config_proto, keep_checkpoint_max=None, keep_checkpoint_every_n_hours=1e9, # disabled log_step_count_steps=1e9, train_distribute=None, device_fn=None, protocol=None, eval_distribute=None, experimental_distribute=None ) estimator = tf.estimator.Estimator( model_fn=model_fn, model_dir=ckpt_path, config=run_config, params={'dtype': tf.float16 if use_amp else tf.float32} ) print('[*] Exporting the model ...') input_type = tf.float16 if use_amp else tf.float32 def get_serving_input_receiver_fn(): def serving_input_receiver_fn(): features = tf.placeholder(dtype=input_type, shape=input_shape, name='input_tensor') return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features) return serving_input_receiver_fn export_path = estimator.export_saved_model( export_dir_base=output_dir, serving_input_receiver_fn=get_serving_input_receiver_fn(), checkpoint_path=ckpt_path ) print('[*] Done! path: `%s`' % export_path.decode()) if compress: _compress(export_path.decode(), os.path.join(output_dir, 'saved_model')) @_print_input def to_tf_trt(savedmodel_dir: str, output_dir: str, precision: str, feed_dict_fn: Callable, num_runs: int, output_tensor_names: List[str], compress: bool): """ Export Tensorflow savedModel to TF-TRT :param savedmodel_dir: (str) Input directory containing a Tensorflow savedModel :param output_dir: (str) Output directory for storage of the generated TF-TRT exported model :param precision: (str) Desired precision of the network (FP32, FP16 or INT8) :param feed_dict_fn: (Callable) Input tensors for INT8 calibration. Model specific. :param num_runs: (int) Number of calibration runs. :param output_tensor_names: (List) Name of the output tensor for graph conversion. Model specific. :param compress: (bool) Compress output """ if savedmodel_dir is None or not os.path.exists(savedmodel_dir): raise FileNotFoundError('savedmodel_dir not found: {}'.format(savedmodel_dir)) if os.path.exists(output_dir): print('[*] Output dir \'{}\' is not empty. Cleaning up ...'.format(output_dir)) shutil.rmtree(output_dir) print('[*] Converting model...') converter = trt.TrtGraphConverter(input_saved_model_dir=savedmodel_dir, precision_mode=precision) converter.convert() if precision == 'INT8': print('[*] Running INT8 calibration ...') converter.calibrate(fetch_names=output_tensor_names, num_runs=num_runs, feed_dict_fn=feed_dict_fn) converter.save(output_dir) print('[*] Done! TF-TRT saved_model stored in: `%s`' % output_dir) if compress: _compress('tftrt_saved_model', output_dir) @_print_input def to_onnx(input_dir: str, output_dir: str, compress: bool): """ Convert Tensorflow savedModel to ONNX with tf2onnx :param input_dir: (str) Input directory with a Tensorflow savedModel :param output_dir: (str) Output directory where to store the ONNX version of the model :param compress: (bool) Compress output """ if not os.path.exists(output_dir): os.makedirs(output_dir) file_name = os.path.join(output_dir, 'model.onnx') print('[*] Converting model...') ret = subprocess.call(['python', '-m', 'tf2onnx.convert', '--saved-model', input_dir, '--output', file_name], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) if ret > 0: raise RuntimeError('tf2onnx.convert has failed with error: {}'.format(ret)) print('[*] Done! ONNX file stored in: %s' % file_name) if compress: _compress(output_dir, 'onnx_model')
PyTorch/DrugDiscovery/MoFlow/moflow/runtime
runtime
train
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 Chengxi Zang # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. import argparse import functools import json import logging import os import signal from typing import Dict from apex.contrib.clip_grad import clip_grad_norm_ from apex.optimizers import FusedAdam as Adam import torch from torch.cuda.amp import autocast, GradScaler from torch.utils.data.distributed import DistributedSampler from moflow.config import CONFIGS, Config from moflow.data.data_loader import NumpyTupleDataset from moflow.data import transform from moflow.model.model import MoFlow, MoFlowLoss from moflow.model.utils import initialize from moflow.runtime.logger import MetricsLogger, PerformanceLogger, setup_logging from moflow.runtime.arguments import PARSER from moflow.runtime.common import get_newest_checkpoint, load_state, save_state from moflow.runtime.distributed_utils import ( get_device, get_rank, get_world_size, init_distributed, reduce_tensor ) from moflow.runtime.generate import infer from moflow.utils import check_validity, convert_predictions_to_mols torch._C._jit_set_autocast_mode(True) def run_validation(model: MoFlow, config: Config, ln_var: float, args: argparse.Namespace, is_distributed: bool, world_size: int, device: torch.device) -> Dict[str, float]: model.eval() if is_distributed: model_callable = model.module else: model_callable = model result = infer(model_callable, config, device=device, ln_var=ln_var, batch_size=args.val_batch_size, temp=args.temperature) mols = convert_predictions_to_mols(*result, correct_validity=args.correct_validity) validity_info = check_validity(mols) valid_ratio = torch.tensor(validity_info['valid_ratio'], dtype=torch.float32, device=device) unique_ratio = torch.tensor(validity_info['unique_ratio'], dtype=torch.float32, device=device) valid_value = reduce_tensor(valid_ratio, world_size).detach().cpu().numpy() unique_value = reduce_tensor(unique_ratio, world_size).detach().cpu().numpy() model.train() return {'valid': valid_value, 'unique': unique_value} def train(args: argparse.Namespace) -> None: os.makedirs(args.results_dir, exist_ok=True) # Device configuration device = get_device(args.local_rank) torch.cuda.set_stream(torch.cuda.Stream()) is_distributed = init_distributed() world_size = get_world_size() local_rank = get_rank() logger = setup_logging(args) if local_rank == 0: perf_logger = PerformanceLogger(logger, args.batch_size * world_size, args.warmup_steps) acc_logger = MetricsLogger(logger) if local_rank == 0: logging.info('Input args:') logging.info(json.dumps(vars(args), indent=4, separators=(',', ':'))) # Model configuration assert args.config_name in CONFIGS config = CONFIGS[args.config_name] data_file = config.dataset_config.dataset_file transform_fn = functools.partial(transform.transform_fn, config=config) valid_idx = transform.get_val_ids(config, args.data_dir) if local_rank == 0: logging.info('Config:') logging.info(str(config)) model = MoFlow(config) model.to(device) loss_module = MoFlowLoss(config) loss_module.to(device) # Datasets: dataset = NumpyTupleDataset.load( os.path.join(args.data_dir, data_file), transform=transform_fn, ) if len(valid_idx) == 0: raise ValueError('Empty validation set!') train_idx = [t for t in range(len(dataset)) if t not in valid_idx] train = torch.utils.data.Subset(dataset, train_idx) test = torch.utils.data.Subset(dataset, valid_idx) if world_size > 1: sampler = DistributedSampler(train, seed=args.seed, drop_last=False) else: sampler = None train_dataloader = torch.utils.data.DataLoader( train, batch_size=args.batch_size, shuffle=sampler is None, sampler=sampler, num_workers=args.num_workers, drop_last=True, ) if local_rank == 0: logging.info(f'Using {world_size} GPUs') logging.info(f'Num training samples: {len(train)}') logging.info(f'Minibatch-size: {args.batch_size}') logging.info(f'Num Iter/Epoch: {len(train_dataloader)}') logging.info(f'Num epoch: {args.epochs}') if is_distributed: train_dataloader.sampler.set_epoch(-1) x, adj, *_ = next(iter(train_dataloader)) x = x.to(device) adj = adj.to(device) with autocast(enabled=args.amp): initialize(model, (adj, x)) model.to(memory_format=torch.channels_last) adj.to(memory_format=torch.channels_last) if args.jit: model.bond_model = torch.jit.script(model.bond_model) model.atom_model = torch.jit.script(model.atom_model) # make one pass in both directions to make sure that model works with torch.no_grad(): _ = model(adj, x) _ = model.reverse(torch.randn(args.batch_size, config.z_dim, device=device)) if is_distributed: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[local_rank], output_device=local_rank, ) loss_module = torch.nn.parallel.DistributedDataParallel( loss_module, device_ids=[local_rank], output_device=local_rank, ) model_callable = model.module loss_callable = loss_module.module else: model_callable = model loss_callable = loss_module # Loss and optimizer optimizer = Adam((*model.parameters(), *loss_module.parameters()), lr=args.learning_rate, betas=(args.beta1, args.beta2)) scaler = GradScaler() if args.save_epochs == -1: args.save_epochs = args.epochs if args.eval_epochs == -1: args.eval_epochs = args.epochs if args.steps == -1: args.steps = args.epochs * len(train_dataloader) snapshot_path = get_newest_checkpoint(args.results_dir) if snapshot_path is not None: snapshot_epoch, ln_var = load_state(snapshot_path, model_callable, optimizer=optimizer, device=device) loss_callable.ln_var = torch.nn.Parameter(torch.tensor(ln_var)) first_epoch = snapshot_epoch + 1 step = first_epoch * len(train_dataloader) else: first_epoch = 0 step = 0 if first_epoch >= args.epochs: logging.info(f'Model was already trained for {first_epoch} epochs') exit(0) for epoch in range(first_epoch, args.epochs): if local_rank == 0: acc_logger.reset() if is_distributed: train_dataloader.sampler.set_epoch(epoch) for i, batch in enumerate(train_dataloader): if local_rank == 0: perf_logger.update() step += 1 optimizer.zero_grad() x = batch[0].to(device) adj = batch[1].to(device=device,memory_format=torch.channels_last) # Forward, backward and optimize with_cuda_graph = ( args.cuda_graph and step >= args.warmup_steps and x.size(0) == args.batch_size ) with autocast(enabled=args.amp, cache_enabled=not with_cuda_graph): output = model(adj, x, with_cuda_graph=with_cuda_graph) nll_x, nll_adj = loss_module(*output) loss = nll_x + nll_adj if args.amp: scaler.scale(loss).backward() scaler.unscale_(optimizer) clip_grad_norm_(model.parameters(), args.clip) scaler.step(optimizer) scaler.update() else: loss.backward() clip_grad_norm_(model.parameters(), args.clip) optimizer.step() # Print log info if (i + 1) % args.log_interval == 0: nll_x_value = reduce_tensor(nll_x, world_size).item() nll_adj_value = reduce_tensor(nll_adj, world_size).item() loss_value = nll_x_value + nll_adj_value if local_rank == 0: acc_logger.update({ 'loglik': loss_value, 'nll_x': nll_x_value, 'nll_adj': nll_adj_value }) acc_logger.summarize(step=(epoch, i, i)) perf_logger.summarize(step=(epoch, i, i)) if step >= args.steps: break if (epoch + 1) % args.eval_epochs == 0: with autocast(enabled=args.amp): metrics = run_validation(model, config, loss_callable.ln_var.item(), args, is_distributed, world_size, device) if local_rank == 0: acc_logger.update(metrics) # The same report for each epoch if local_rank == 0: acc_logger.summarize(step=(epoch,)) perf_logger.summarize(step=(epoch,)) # Save the model checkpoints if (epoch + 1) % args.save_epochs == 0: if local_rank == 0 or not is_distributed: save_state(args.results_dir, model_callable, optimizer, loss_callable.ln_var.item(), epoch, keep=5) if step >= args.steps: break if local_rank == 0: acc_logger.summarize(step=tuple()) perf_logger.summarize(step=tuple()) if __name__ == '__main__': from rdkit import RDLogger RDLogger.DisableLog('rdApp.*') args = PARSER.parse_args() train(args)
PyTorch/SpeechSynthesis/FastPitch/triton
triton
calculate_metrics
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class. Data provided to `MetricsCalculator` are obtained from npz dump files stored in directory pointed by `--dump-dir` argument. Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts. Output data is stored in csv file pointed by `--csv` argument. Example call: ```shell script python ./triton/calculate_metrics.py \ --dump-dir /results/dump_triton \ --csv /results/accuracy_results.csv \ --metrics metrics.py \ --metric-class-param1 value ``` """ import argparse import csv import logging import string from pathlib import Path import numpy as np # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file from .deployment_toolkit.dump import pad_except_batch_axis LOGGER = logging.getLogger("calculate_metrics") TOTAL_COLUMN_NAME = "_total_" def get_data(dump_dir, prefix): """Loads and concatenates dump files for given prefix (ex. inputs, outputs, labels, ids)""" dump_dir = Path(dump_dir) npz_files = sorted(dump_dir.glob(f"{prefix}*.npz")) data = None if npz_files: # assume that all npz files with given prefix contain same set of names names = list(np.load(npz_files[0].as_posix()).keys()) # calculate target shape target_shape = { name: tuple(np.max([np.load(npz_file.as_posix())[name].shape for npz_file in npz_files], axis=0)) for name in names } # pad and concatenate data data = { name: np.concatenate( [pad_except_batch_axis(np.load(npz_file.as_posix())[name], target_shape[name]) for npz_file in npz_files] ) for name in names } return data def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False) parser.add_argument("--metrics", help=f"Path to python module containing metrics calculator", required=True) parser.add_argument("--csv", help="Path to csv file", required=True) parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True) args, *_ = parser.parse_known_args() MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") ArgParserGenerator(MetricsCalculator).update_argparser(parser) args = parser.parse_args() LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args) ids = get_data(args.dump_dir, "ids")["ids"] x = get_data(args.dump_dir, "inputs") y_true = get_data(args.dump_dir, "labels") y_pred = get_data(args.dump_dir, "outputs") common_keys = list({k for k in (y_true or [])} & {k for k in (y_pred or [])}) for key in common_keys: if y_true[key].shape != y_pred[key].shape: LOGGER.warning( f"Model predictions and labels shall have equal shapes. " f"y_pred[{key}].shape={y_pred[key].shape} != " f"y_true[{key}].shape={y_true[key].shape}" ) metrics = metrics_calculator.calc(ids=ids, x=x, y_pred=y_pred, y_real=y_true) metrics = {TOTAL_COLUMN_NAME: len(ids), **metrics} metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])] if metric_names_with_space: raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}") csv_path = Path(args.csv) csv_path.parent.mkdir(parents=True, exist_ok=True) with csv_path.open("w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys())) writer.writeheader() writer.writerow(metrics) if __name__ == "__main__": main()
PyTorch/LanguageModeling/BERT/data
data
GooglePretrainedWeightDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import os import urllib.request import zipfile class GooglePretrainedWeightDownloader: def __init__(self, save_path): self.save_path = save_path + '/google_pretrained_weights' if not os.path.exists(self.save_path): os.makedirs(self.save_path) # Download urls self.model_urls = { 'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'), 'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'), 'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'), 'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'), 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') } # SHA256sum verification for file download integrity (and checking for changes from the download source over time) self.bert_base_uncased_sha = { 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', 'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84', 'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b', 'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_large_uncased_sha = { 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', 'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1', 'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093', 'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_base_cased_sha = { 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', 'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea', 'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1', 'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_large_cased_sha = { 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', 'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0', 'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf', 'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_base_multilingual_cased_sha = { 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', } self.bert_large_multilingual_uncased_sha = { 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', } self.bert_base_chinese_sha = { 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', } # Relate SHA to urls for loop below self.model_sha = { 'bert_base_uncased': self.bert_base_uncased_sha, 'bert_large_uncased': self.bert_large_uncased_sha, 'bert_base_cased': self.bert_base_cased_sha, 'bert_large_cased': self.bert_large_cased_sha, 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha, 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha, 'bert_base_chinese': self.bert_base_chinese_sha } # Helper to get sha256sum of a file def sha256sum(self, filename): h = hashlib.sha256() b = bytearray(128*1024) mv = memoryview(b) with open(filename, 'rb', buffering=0) as f: for n in iter(lambda : f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def download(self): # Iterate over urls: download, unzip, verify sha256sum found_mismatch_sha = False for model in self.model_urls: url = self.model_urls[model][0] file = self.save_path + '/' + self.model_urls[model][1] print('Downloading', url) response = urllib.request.urlopen(url) with open(file, 'wb') as handle: handle.write(response.read()) print('Unzipping', file) zip = zipfile.ZipFile(file, 'r') zip.extractall(self.save_path) zip.close() sha_dict = self.model_sha[model] for extracted_file in sha_dict: sha = sha_dict[extracted_file] if sha != self.sha256sum(file[:-4] + '/' + extracted_file): found_mismatch_sha = True print('SHA256sum does not match on file:', extracted_file, 'from download url:', url) else: print(file[:-4] + '/' + extracted_file, '\t', 'verified') if not found_mismatch_sha: print("All downloads pass sha256sum verification.") def serialize(self): pass def deserialize(self): pass def listAvailableWeights(self): print("Available Weight Datasets") for item in self.model_urls: print(item) def listLocallyStoredWeights(self): pass
TensorFlow/LanguageModeling/BERT/data
data
NVIDIAPretrainedWeightDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os class NVIDIAPretrainedWeightDownloader: def __init__(self, save_path): self.save_path = save_path + '/nvidia_pretrained_weights' if not os.path.exists(self.save_path): os.makedirs(self.save_path) pass def download(self): assert False, 'NVIDIAPretrainedWeightDownloader not implemented yet.'
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine
engine
inference
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import datetime import logging import time import os import torch from tqdm import tqdm from maskrcnn_benchmark.data.datasets.evaluation import evaluate from ..utils.comm import is_main_process, all_gather, synchronize, synchronized_timestamp def compute_on_dataset(model, data_loader, device, steps=-1): model.eval() results_dict = {} latency = [] cpu_device = torch.device("cpu") for i, batch in enumerate(tqdm(data_loader)): #Break earlier for inference on partial dataset if steps > -1 and i >= steps: break images, targets, image_ids = batch images = images.to(device) with torch.no_grad(): batch_start = time.perf_counter() output = model(images) latency.append(time.perf_counter() - batch_start) output = [o.to(cpu_device) for o in output] results_dict.update( {img_id: result for img_id, result in zip(image_ids, output)} ) return results_dict, latency def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu): all_predictions = all_gather(predictions_per_gpu) if not is_main_process(): return # merge the list of dicts predictions = {} for p in all_predictions: predictions.update(p) # convert a dict where the key is the index in a list image_ids = list(sorted(predictions.keys())) if len(image_ids) != image_ids[-1] + 1: logger = logging.getLogger("maskrcnn_benchmark.inference") logger.warning( "Number of images that were gathered from multiple processes is not " "a contiguous set. Some images might be missing from the evaluation" ) # convert to a list predictions = [predictions[i] for i in image_ids] return predictions def inference( model, data_loader, dataset_name, iou_types=("bbox",), box_only=False, device="cuda", expected_results=(), expected_results_sigma_tol=4, output_folder=None, skip_eval=False, dllogger=None, steps=-1, profile=False, ): # convert to a torch.device for efficiency device = torch.device(device) num_devices = ( torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1 ) dataset = data_loader.dataset dllogger.log(step="PARAMETER", data={"eval_dataset_name": dataset_name, "eval_num_samples":len(dataset)}) start_time = synchronized_timestamp() with torch.autograd.profiler.emit_nvtx(enabled=profile): predictions, latency = compute_on_dataset(model, data_loader, device, steps=steps) # wait for all processes to complete before measuring the time synchronize() total_time = time.time() - start_time latency_avg = sum(latency) / len(latency) latency.sort() def _latency_avg(n): return sum(latency[:n]) / n latency_90 = _latency_avg(int(len(latency)*0.9)) latency_95 = _latency_avg(int(len(latency)*0.95)) latency_99 = _latency_avg(int(len(latency)*0.99)) len_dataset = len(dataset) if steps is -1 else steps total_time_str = str(datetime.timedelta(seconds=total_time)) dllogger.log(step=tuple(), data={"e2e_infer_time": total_time, "inference_perf_fps": len_dataset / total_time}) stats = {'latency_avg' : latency_avg, 'latency_90': latency_90, 'latency_95' : latency_95, 'latency_99': latency_99,} dllogger.log(step=tuple(), data=stats) logger = logging.getLogger("maskrcnn_benchmark.inference") logger.info( "Total inference time: {} ({} s / img per device, on {} devices)".format( total_time_str, total_time * num_devices / len_dataset, num_devices ) ) predictions = _accumulate_predictions_from_multiple_gpus(predictions) if not is_main_process(): return if output_folder: torch.save(predictions, os.path.join(output_folder, "predictions.pth")) if skip_eval: dllogger.log(step="PARAMETER", data={"skip_eval":True, "predictions_saved_path":os.path.join(output_folder, "predictions.pth")}) return extra_args = dict( box_only=box_only, iou_types=iou_types, expected_results=expected_results, expected_results_sigma_tol=expected_results_sigma_tol, ) return evaluate(dataset=dataset, predictions=predictions, output_folder=output_folder, **extra_args)
TensorFlow/LanguageModeling/BERT/triton
triton
run_squad_triton_client
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import modeling import tokenization import tritongrpcclient from utils.create_squad_data import * import grpc from run_squad import write_predictions, get_predictions, RawResult import numpy as np import tqdm from functools import partial import sys if sys.version_info >= (3, 0): import queue else: import Queue as queue flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 384, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_integer( "doc_stride", 128, "When splitting up a long document into chunks, how much stride to " "take between chunks.") flags.DEFINE_integer( "max_query_length", 64, "The maximum number of tokens for the question. Questions longer than " "this will be truncated to this length.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predictions.") flags.DEFINE_integer( "n_best_size", 20, "The total number of n-best predictions to generate in the " "nbest_predictions.json output file.") flags.DEFINE_integer( "max_answer_length", 30, "The maximum length of an answer that can be generated. This is needed " "because the start and end predictions are not conditioned on one another.") flags.DEFINE_bool( "version_2_with_negative", False, "If true, the SQuAD examples contain some that do not have an answer.") flags.DEFINE_bool( "verbose_logging", False, "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") flags.DEFINE_bool( "trt_engine", False, "If true, expects a trt engine defined input/output") # Triton Specific flags flags.DEFINE_string("triton_model_name", "bert", "exports to appropriate directory for Triton") flags.DEFINE_integer("triton_model_version", 1, "exports to appropriate directory for Triton") flags.DEFINE_string("triton_server_url", "localhost:8001", "exports to appropriate directory for Triton") # Input Text for Inference flags.DEFINE_string("question", None, "Question for Inference") flags.DEFINE_string("context", None, "Context for Inference") flags.DEFINE_string( "predict_file", None, "SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") # Set this to either 'label_ids' for Google bert or 'unique_ids' for JoC label_id_key = "unique_ids" # User defined class to store infer_ctx and request id # from callback function and let main thread to handle them class UserData: def __init__(self): self._completed_requests = queue.Queue() # Callback function used for async_run(), it can capture # additional information using functools.partial as long as the last # two arguments are reserved for InferContext and request id def completion_callback(user_data, idx, start_time, inputs, result, error): user_data._completed_requests.put((result, error, idx, start_time, inputs)) def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): label_ids_data = () input_ids_data = () input_mask_data = () segment_ids_data = () for i in range(0, min(n, l-ndx)): label_ids_data = label_ids_data + (np.array([iterable[ndx + i].unique_id], dtype=np.int32),) input_ids_data = input_ids_data+ (np.array(iterable[ndx + i].input_ids, dtype=np.int32),) input_mask_data = input_mask_data+ (np.array(iterable[ndx + i].input_mask, dtype=np.int32),) segment_ids_data = segment_ids_data+ (np.array(iterable[ndx + i].segment_ids, dtype=np.int32),) if FLAGS.trt_engine and len(label_ids_data) != n: #TRT needs exact batch size. Pad as necessary pad_size = n - len(label_ids_data) label_ids_data = label_ids_data + ((np.array([0], dtype=np.int32),) * pad_size) input_ids_data = input_ids_data + ((np.zeros(FLAGS.max_seq_length, dtype=np.int32),) * pad_size) input_mask_data = input_mask_data + ((np.zeros(FLAGS.max_seq_length, dtype=np.int32),) * pad_size) segment_ids_data = segment_ids_data + ((np.zeros(FLAGS.max_seq_length, dtype=np.int32),) * pad_size) inputs_dict = {label_id_key: label_ids_data, 'input_ids': input_ids_data, 'input_mask': input_mask_data, 'segment_ids': segment_ids_data} yield inputs_dict def main(_): """ Ask a question of context on Triton. :param context: str :param question: str :param question_id: int :return: """ os.environ["TF_XLA_FLAGS"] = "--tf_xla_enable_lazy_compilation=false" #causes memory fragmentation for bert leading to OOM tf.compat.v1.logging.info("***** Configuaration *****") for key in FLAGS.__flags.keys(): tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key))) tf.compat.v1.logging.info("**************************") tokenizer = tokenization.FullTokenizer(vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) # Get the Data if FLAGS.question and FLAGS.context: input_data = [{"paragraphs":[{"context":FLAGS.context, "qas":[{"id":0, "question":FLAGS.question}]}]}] eval_examples = read_squad_examples(input_file=None, is_training=False, version_2_with_negative=FLAGS.version_2_with_negative, input_data=input_data) elif FLAGS.predict_file: eval_examples = read_squad_examples( input_file=FLAGS.predict_file, is_training=False, version_2_with_negative=FLAGS.version_2_with_negative) else: raise ValueError("Either predict_file or question+answer need to defined") # Get Eval Features = Preprocessing eval_features = [] def append_feature(feature): eval_features.append(feature) convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=FLAGS.max_seq_length, doc_stride=FLAGS.doc_stride, max_query_length=FLAGS.max_query_length, is_training=False, output_fn=append_feature) protocol_str = 'grpc' # http or grpc url = FLAGS.triton_server_url verbose = False model_name = FLAGS.triton_model_name model_version = str(FLAGS.triton_model_version) batch_size = FLAGS.predict_batch_size triton_client = tritongrpcclient.InferenceServerClient(url, verbose) model_metadata = triton_client.get_model_metadata( model_name=model_name, model_version=model_version) model_config = triton_client.get_model_config( model_name=model_name, model_version=model_version) user_data = UserData() max_outstanding = 20 # Number of outstanding requests outstanding = 0 sent_prog = tqdm.tqdm(desc="Send Requests", total=len(eval_features)) recv_prog = tqdm.tqdm(desc="Recv Requests", total=len(eval_features)) def process_outstanding(do_wait, outstanding): if (outstanding == 0 or do_wait is False): return outstanding # Wait for deferred items from callback functions (result, error, idx, start_time, inputs) = user_data._completed_requests.get() if (result is None): return outstanding stop = time.time() if (error is not None): raise ValueError("Context returned null for async id marked as done") outstanding -= 1 time_list.append(stop - start_time) batch_count = len(inputs[label_id_key]) if FLAGS.trt_engine: cls_squad_logits = result.as_numpy("cls_squad_logits") try: #when batch size > 1 start_logits_results = np.array(cls_squad_logits.squeeze()[:, :, 0]) end_logits_results = np.array(cls_squad_logits.squeeze()[:, :, 1]) except: start_logits_results = np.expand_dims(np.array(cls_squad_logits.squeeze()[:, 0]), axis=0) end_logits_results = np.expand_dims(np.array(cls_squad_logits.squeeze()[:, 1]), axis=0) else: start_logits_results = result.as_numpy("start_logits") end_logits_results = result.as_numpy("end_logits") for i in range(batch_count): unique_id = int(inputs[label_id_key][i][0]) start_logits = [float(x) for x in start_logits_results[i].flat] end_logits = [float(x) for x in end_logits_results[i].flat] all_results.append( RawResult( unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) recv_prog.update(n=batch_count) return outstanding all_results = [] time_list = [] print("Starting Sending Requests....\n") all_results_start = time.time() idx = 0 for inputs_dict in batch(eval_features, batch_size): present_batch_size = len(inputs_dict[label_id_key]) if not FLAGS.trt_engine: label_ids_data = np.stack(inputs_dict[label_id_key]) input_ids_data = np.stack(inputs_dict['input_ids']) input_mask_data = np.stack(inputs_dict['input_mask']) segment_ids_data = np.stack(inputs_dict['segment_ids']) inputs = [] inputs.append(tritongrpcclient.InferInput('input_ids', input_ids_data.shape, "INT32")) inputs[0].set_data_from_numpy(input_ids_data) inputs.append(tritongrpcclient.InferInput('input_mask', input_mask_data.shape, "INT32")) inputs[1].set_data_from_numpy(input_mask_data) inputs.append(tritongrpcclient.InferInput('segment_ids', segment_ids_data.shape, "INT32")) inputs[2].set_data_from_numpy(segment_ids_data) if not FLAGS.trt_engine: inputs.append(tritongrpcclient.InferInput(label_id_key, label_ids_data.shape, "INT32")) inputs[3].set_data_from_numpy(label_ids_data) outputs = [] if FLAGS.trt_engine: outputs.append(tritongrpcclient.InferRequestedOutput('cls_squad_logits')) else: outputs.append(tritongrpcclient.InferRequestedOutput('start_logits')) outputs.append(tritongrpcclient.InferRequestedOutput('end_logits')) start_time = time.time() triton_client.async_infer( model_name, inputs, partial(completion_callback, user_data, idx, start_time, inputs_dict), request_id=str(idx), model_version=model_version, outputs=outputs) outstanding += 1 idx += 1 sent_prog.update(n=present_batch_size) # Try to process at least one response per request outstanding = process_outstanding(outstanding >= max_outstanding, outstanding) tqdm.tqdm.write("All Requests Sent! Waiting for responses. Outstanding: {}.\n".format(outstanding)) # Now process all outstanding requests while (outstanding > 0): outstanding = process_outstanding(True, outstanding) all_results_end = time.time() all_results_total = (all_results_end - all_results_start) * 1000.0 print("-----------------------------") print("Total Time: {} ms".format(all_results_total)) print("-----------------------------") print("-----------------------------") print("Total Inference Time = %0.2f for" "Sentences processed = %d" % (sum(time_list), len(eval_features))) print("Throughput Average (sentences/sec) = %0.2f" % (len(eval_features) / all_results_total * 1000.0)) print("-----------------------------") if FLAGS.output_dir and FLAGS.predict_file: # When inferencing on a dataset, get inference statistics and write results to json file time_list.sort() avg = np.mean(time_list) cf_95 = max(time_list[:int(len(time_list) * 0.95)]) cf_99 = max(time_list[:int(len(time_list) * 0.99)]) cf_100 = max(time_list[:int(len(time_list) * 1)]) print("-----------------------------") print("Summary Statistics") print("Batch size =", FLAGS.predict_batch_size) print("Sequence Length =", FLAGS.max_seq_length) print("Latency Confidence Level 95 (ms) =", cf_95 * 1000) print("Latency Confidence Level 99 (ms) =", cf_99 * 1000) print("Latency Confidence Level 100 (ms) =", cf_100 * 1000) print("Latency Average (ms) =", avg * 1000) print("-----------------------------") output_prediction_file = os.path.join(FLAGS.output_dir, "predictions.json") output_nbest_file = os.path.join(FLAGS.output_dir, "nbest_predictions.json") output_null_log_odds_file = os.path.join(FLAGS.output_dir, "null_odds.json") write_predictions(eval_examples, eval_features, all_results, FLAGS.n_best_size, FLAGS.max_answer_length, FLAGS.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, FLAGS.version_2_with_negative, FLAGS.verbose_logging) else: # When inferencing on a single example, write best answer to stdout all_predictions, all_nbest_json, scores_diff_json = get_predictions( eval_examples, eval_features, all_results, FLAGS.n_best_size, FLAGS.max_answer_length, FLAGS.do_lower_case, FLAGS.version_2_with_negative, FLAGS.verbose_logging) print("Context is: %s \n\nQuestion is: %s \n\nPredicted Answer is: %s" %(FLAGS.context, FLAGS.question, all_predictions[0])) if __name__ == "__main__": flags.mark_flag_as_required("vocab_file") tf.compat.v1.app.run()
PyTorch/Classification/GPUNet/models
models
gpunet_modules
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019-2022 Ross Wightman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Tuple import torch import torch.nn as nn from timm.models.layers import create_act_layer from torch.nn import functional as F # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, k: int, s: int, d: int): return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): ih, iw = x.size()[-2:] pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding( iw, k[1], s[1], d[1] ) if pad_h > 0 or pad_w > 0: x = F.pad( x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value, ) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == "same": # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == "valid": # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): padding = kwargs.pop("padding", "") kwargs.setdefault("bias", False) padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): """Select a 2d convolution implementation based on arguments Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. Used extensively by EfficientNet, MobileNetv3 and related networks. """ if isinstance(kernel_size, list): raise NotImplementedError else: depthwise = kwargs.pop("depthwise", False) # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 groups = in_channels if depthwise else kwargs.pop("groups", 1) if "num_experts" in kwargs and kwargs["num_experts"] > 0: raise NotImplementedError else: m = create_conv2d_pad( in_channels, out_channels, kernel_size, groups=groups, **kwargs ) return m def get_act(actType: str = ""): if actType == "swish": return nn.SiLU elif actType == "relu": return nn.ReLU else: raise NotImplementedError def make_divisible(v, divisor=8, min_value=None, round_limit=0.9): min_value = min_value or divisor new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) # Make sure that round down does not go down by more than 10%. if new_v < round_limit * v: new_v += divisor return new_v def drop_path(x, drop_prob: float = 0.0, training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * ( x.ndim - 1 ) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class SqueezeExcite(nn.Module): """Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family Args: in_chs (int): input channels to layer rd_ratio (float): ratio of squeeze reduction act_layer (nn.Module): activation layer of containing block gate_layer (Callable): attention gate function force_act_layer (nn.Module): override block's activation fn if this is set/bound rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs """ def __init__( self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU, gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None, ): super(SqueezeExcite, self).__init__() if rd_channels is None: rd_round_fn = rd_round_fn or round rd_channels = rd_round_fn(in_chs * rd_ratio) act_layer = force_act_layer or act_layer self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True) self.act1 = create_act_layer(act_layer, inplace=True) self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True) self.gate = create_act_layer(gate_layer) def forward(self, x): x_se = x.mean((2, 3), keepdim=True) x_se = self.conv_reduce(x_se) x_se = self.act1(x_se) x_se = self.conv_expand(x_se) return x * self.gate(x_se) class ConvBnAct(nn.Module): """Conv + Norm Layer + Activation w/ optional skip connection""" def __init__( self, in_chs, out_chs, kernel_size, stride=1, dilation=1, pad_type="", skip=False, act_layer="relu", norm_layer=nn.BatchNorm2d, drop_path_rate=0.0, ): super(ConvBnAct, self).__init__() self.has_residual = skip and stride == 1 and in_chs == out_chs self.drop_path_rate = drop_path_rate self.conv = create_conv2d( in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=pad_type, ) self.bn1 = norm_layer(out_chs, eps=0.001) self.act1 = get_act(act_layer)(inplace=True) # for representation. self.in_channels = in_chs self.out_channels = out_chs self.kernel_size = kernel_size self.stride = stride self.act_layer = act_layer def feature_info(self, location): if location == "expansion": # output of conv after act, same as block coutput info = dict( module="act1", hook_type="forward", num_chs=self.conv.out_channels ) else: info = dict(module="", hook_type="", num_chs=self.conv.out_channels) return info def __repr__(self): name = "conv_k{}_i{}_o{}_s{}_{}".format( self.kernel_size, self.in_channels, self.out_channels, self.stride, self.act_layer, ) return name def forward(self, x): shortcut = x x = self.conv(x) x = self.bn1(x) x = self.act1(x) if self.has_residual: if self.drop_path_rate > 0.0: x = drop_path(x, self.drop_path_rate, self.training) x += shortcut return x class DepthwiseSeparableConv(nn.Module): """DepthwiseSeparable block Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion (factor of 1.0). This is an alternative to having a IR with an optional first pw conv. """ def __init__( self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type="", noskip=False, pw_kernel_size=1, pw_act=False, act_layer="relu", norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.0, ): super(DepthwiseSeparableConv, self).__init__() self.has_residual = (stride == 1 and in_chs == out_chs) and not noskip self.has_pw_act = pw_act # activation after point-wise conv self.drop_path_rate = drop_path_rate self.conv_dw = create_conv2d( in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True, ) self.bn1 = norm_layer(in_chs, eps=0.001) self.act1 = get_act(act_layer)(inplace=True) # Squeeze-and-excitation self.se = ( se_layer(in_chs, act_layer=get_act(act_layer)) if se_layer else nn.Identity() ) self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type) self.bn2 = norm_layer(out_chs, eps=0.001) self.act2 = act_layer(inplace=True) if self.has_pw_act else nn.Identity() def feature_info(self, location): if location == "expansion": # after SE, input to PW info = dict( module="conv_pw", hook_type="forward_pre", num_chs=self.conv_pw.in_channels, ) else: # location == 'bottleneck', block output info = dict(module="", hook_type="", num_chs=self.conv_pw.out_channels) return info def forward(self, x): shortcut = x x = self.conv_dw(x) x = self.bn1(x) x = self.act1(x) x = self.se(x) x = self.conv_pw(x) x = self.bn2(x) x = self.act2(x) if self.has_residual: if self.drop_path_rate > 0.0: x = drop_path(x, self.drop_path_rate, self.training) x += shortcut return x class InvertedResidual(nn.Module): """Inverted residual block w/ optional SE Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in * MNasNet - https://arxiv.org/abs/1807.11626 * EfficientNet - https://arxiv.org/abs/1905.11946 * MobileNet-V3 - https://arxiv.org/abs/1905.02244 """ def __init__( self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, pad_type="", noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer="relu", norm_layer=nn.BatchNorm2d, use_se=None, se_ratio=0.25, conv_kwargs=None, drop_path_rate=0.0, ): super(InvertedResidual, self).__init__() conv_kwargs = conv_kwargs or {} mid_chs = make_divisible(in_chs * exp_ratio) self.has_residual = (in_chs == out_chs and stride == 1) and not noskip self.drop_path_rate = drop_path_rate # Point-wise expansion self.conv_pw = create_conv2d( in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs ) self.bn1 = norm_layer(mid_chs, eps=0.001) self.act1 = get_act(act_layer)(inplace=True) # Depth-wise convolution self.conv_dw = create_conv2d( mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, depthwise=True, **conv_kwargs ) self.bn2 = norm_layer(mid_chs, eps=0.001) self.act2 = get_act(act_layer)(inplace=True) # Squeeze-and-excitation self.use_se = use_se if use_se: rd_ratio = se_ratio / exp_ratio self.se = SqueezeExcite( mid_chs, act_layer=get_act(act_layer), rd_ratio=rd_ratio ) else: self.se = nn.Identity() # Point-wise linear projection self.conv_pwl = create_conv2d( mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs ) self.bn3 = norm_layer(out_chs, eps=0.001) # For representation self.in_channels = in_chs self.out_channels = out_chs self.kernel_size = dw_kernel_size self.expansion = exp_ratio self.stride = stride self.act_layer = act_layer def feature_info(self, location): if location == "expansion": # after SE, input to PWL info = dict( module="conv_pwl", hook_type="forward_pre", num_chs=self.conv_pwl.in_channels, ) else: # location == 'bottleneck', block output info = dict(module="", hook_type="", num_chs=self.conv_pwl.out_channels) return info def __repr__(self): name = "irb_k{}_e{}_i{}_o{}_s{}_{}_se_{}".format( self.kernel_size, self.expansion, self.in_channels, self.out_channels, self.stride, self.act_layer, self.use_se, ) return name def forward(self, x): shortcut = x # Point-wise expansion x = self.conv_pw(x) x = self.bn1(x) x = self.act1(x) # Depth-wise convolution x = self.conv_dw(x) x = self.bn2(x) x = self.act2(x) # Squeeze-and-excitation x = self.se(x) # Point-wise linear projection x = self.conv_pwl(x) x = self.bn3(x) if self.has_residual: if self.drop_path_rate > 0.0: x = drop_path(x, self.drop_path_rate, self.training) x += shortcut return x class EdgeResidual(nn.Module): """Residual block with expansion convolution followed by pointwise-linear w/ stride Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML` - https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers * MobileDet - https://arxiv.org/abs/2004.14525 * EfficientNet-X - https://arxiv.org/abs/2102.05610 * EfficientNet-V2 - https://arxiv.org/abs/2104.00298 """ def __init__( self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, pad_type="", force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer="relu", norm_layer=nn.BatchNorm2d, use_se=False, se_ratio=0.25, drop_path_rate=0.0, ): super(EdgeResidual, self).__init__() if force_in_chs > 0: mid_chs = make_divisible(force_in_chs * exp_ratio) else: mid_chs = make_divisible(in_chs * exp_ratio) self.has_residual = (in_chs == out_chs and stride == 1) and not noskip self.drop_path_rate = drop_path_rate # Expansion convolution self.conv_exp = create_conv2d( in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, padding=pad_type, ) self.bn1 = norm_layer(mid_chs, eps=0.001) self.act1 = get_act(act_layer)(inplace=True) # Squeeze-and-excitation self.use_se = use_se if use_se: rd_ratio = se_ratio / exp_ratio self.se = SqueezeExcite( mid_chs, act_layer=get_act(act_layer), rd_ratio=rd_ratio ) else: self.se = nn.Identity() # Point-wise linear projection self.conv_pwl = create_conv2d( mid_chs, out_chs, pw_kernel_size, padding=pad_type ) self.bn2 = norm_layer(out_chs, eps=0.001) self.kernel_size = exp_kernel_size self.expansion = exp_ratio self.in_channels = in_chs self.out_channels = out_chs self.stride = stride self.act_layer = act_layer def feature_info(self, location): if location == "expansion": # after SE, before PWL info = dict( module="conv_pwl", hook_type="forward_pre", num_chs=self.conv_pwl.in_channels, ) else: # location == 'bottleneck', block output info = dict(module="", hook_type="", num_chs=self.conv_pwl.out_channels) return info def __repr__(self): name = "er_k{}_e{}_i{}_o{}_s{}_{}_se_{}".format( self.kernel_size, self.expansion, self.in_channels, self.out_channels, self.stride, self.act_layer, self.use_se, ) return name def forward(self, x): shortcut = x # Expansion convolution x = self.conv_exp(x) x = self.bn1(x) x = self.act1(x) # Squeeze-and-excitation x = self.se(x) # Point-wise linear projection x = self.conv_pwl(x) x = self.bn2(x) if self.has_residual: if self.drop_path_rate > 0.0: x = drop_path(x, self.drop_path_rate, self.training) x += shortcut return x class ProloguePool(nn.Module): def __init__(self, num_in_channels, num_out_channels, act_layer="relu"): super().__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.net = nn.Sequential( nn.Conv2d( self.num_in_channels, self.num_out_channels, 3, padding=1, stride=2, bias=False, ), nn.BatchNorm2d(self.num_out_channels, eps=1e-03), get_act(act_layer)(inplace=True), nn.MaxPool2d( kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False ), ) # for representation self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.act_layer = act_layer def __repr__(self): name = "prologue_i{}_o{}_s{}_{}".format( self.num_in_channels, self.num_out_channels, 2, self.act_layer ) return name def forward(self, x): return self.net(x) class Prologue(nn.Module): def __init__(self, num_in_channels, num_out_channels, act_layer="relu"): super().__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.net = nn.Sequential( nn.Conv2d( self.num_in_channels, self.num_out_channels, 3, padding=1, stride=2, bias=False, ), nn.BatchNorm2d(self.num_out_channels, eps=1e-03), get_act(act_layer)(inplace=True), ) # for representation self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.act_layer = act_layer def __repr__(self): name = "prologue_i{}_o{}_s{}_{}".format( self.num_in_channels, self.num_out_channels, 2, self.act_layer ) return name def forward(self, x): return self.net(x) class Epilogue(nn.Module): def __init__( self, num_in_channels, num_out_channels, num_classes, act_layer="relu" ): super().__init__() self.net = nn.Sequential( nn.Conv2d(num_in_channels, num_out_channels, 1, bias=False), nn.BatchNorm2d(num_out_channels, eps=1e-03), get_act(act_layer)(inplace=True), nn.AdaptiveAvgPool2d(1), nn.Flatten(), nn.Dropout(p=0.2), nn.Linear(num_out_channels, num_classes), ) # for representation self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.act_layer = act_layer def __repr__(self): name = "epilogue_i{}_o{}_s{}_{}".format( self.num_in_channels, self.num_out_channels, 1, self.act_layer ) return name def forward(self, x): x = self.net(x) return x # modules for distilled GPUNet class PrologueD(nn.Module): def __init__(self, num_in_channels, num_out_channels): super().__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.net = nn.Sequential( nn.Conv2d( self.num_in_channels, self.num_out_channels, 3, padding=1, stride=2, bias=False, ), nn.BatchNorm2d(self.num_out_channels), nn.ReLU(), ) def __repr__(self): return "Prologue" def forward(self, x): return self.net(x) class PrologueLargeD(nn.Module): def __init__(self, num_in_channels, num_out_channels): super().__init__() self.num_in_channels = num_in_channels self.num_out_channels = num_out_channels self.net = nn.Sequential( nn.Conv2d( self.num_in_channels, self.num_out_channels, 3, padding=1, stride=2, bias=False, ), nn.BatchNorm2d(self.num_out_channels), nn.ReLU(), nn.Conv2d( self.num_out_channels, self.num_out_channels, 3, padding=1, stride=1, bias=False, ), nn.BatchNorm2d(self.num_out_channels), nn.ReLU(), nn.Conv2d( self.num_out_channels, self.num_out_channels, 3, padding=1, stride=1, bias=False, ), nn.BatchNorm2d(self.num_out_channels), nn.ReLU(), ) def __repr__(self): return "PrologueLarge" def forward(self, x): return self.net(x) class Fused_IRB(nn.Module): def __init__( self, num_in_channels: int = 1, num_out_channels: int = 1, kernel_size: int = 3, stride: int = 1, expansion: int = 1, groups: int = 1, ): super().__init__() self.drop_connect_rate = 0.0 self.in_channels = num_in_channels self.out_channels = num_out_channels self.kernel_size = kernel_size self.stride = stride self.expansion = expansion self.groups = groups self.body = nn.Sequential( # merge pw and dw nn.Conv2d( in_channels=self.in_channels, out_channels=self.in_channels * self.expansion, kernel_size=kernel_size, stride=stride, padding=kernel_size // 2, groups=1, bias=False, ), nn.BatchNorm2d(self.in_channels * self.expansion, eps=0.001), nn.ReLU(), # pw nn.Conv2d( in_channels=self.in_channels * self.expansion, out_channels=self.out_channels, kernel_size=1, stride=1, groups=1, bias=False, ), nn.BatchNorm2d(self.out_channels, eps=0.001), ) if self.stride == 1 and self.in_channels == self.out_channels: self.shortcut = nn.Identity() else: self.shortcut = None def drop_connect(self, inputs, training=False, drop_connect_rate=0.0): """Apply drop connect.""" if not training: return inputs keep_prob = 1 - drop_connect_rate random_tensor = keep_prob + torch.rand( (inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device ) random_tensor.floor_() # binarize output = inputs.div(keep_prob) * random_tensor return output def forward(self, x): res = self.body(x) if self.shortcut is not None: if self.drop_connect_rate > 0 and self.training: res = self.drop_connect(res, self.training, self.drop_connect_rate) res = res + self.shortcut(x) return res else: return res def __repr__(self): name = "k{}_e{}_g{}_i{}_o{}_s{}".format( self.kernel_size, self.expansion, self.groups, self.in_channels, self.out_channels, self.stride, ) return name class Inverted_Residual_Block(nn.Module): def __init__( self, num_in_channels, num_out_channels, kernel_size, stride, expansion, groups ): super().__init__() self.drop_connect_rate = 0.0 self.in_channels = num_in_channels self.out_channels = num_out_channels self.kernel_size = kernel_size self.stride = stride self.expansion = expansion self.groups = groups self.body = nn.Sequential( nn.Conv2d( self.in_channels, self.in_channels * self.expansion, 1, groups=groups, bias=False, ), nn.BatchNorm2d(self.in_channels * self.expansion), nn.ReLU(), nn.Conv2d( self.in_channels * self.expansion, self.in_channels * self.expansion, kernel_size, padding=kernel_size // 2, stride=stride, groups=self.in_channels * self.expansion, bias=False, ), nn.BatchNorm2d(self.in_channels * self.expansion), nn.ReLU(), nn.Conv2d( self.in_channels * self.expansion, self.out_channels, 1, groups=groups, bias=False, ), nn.BatchNorm2d(self.out_channels), ) if self.stride == 1 and self.in_channels == self.out_channels: self.shortcut = nn.Identity() else: self.shortcut = None def drop_connect(self, inputs, training=False, drop_connect_rate=0.0): """Apply drop connect.""" if not training: return inputs keep_prob = 1 - drop_connect_rate random_tensor = keep_prob + torch.rand( (inputs.size()[0], 1, 1, 1), dtype=inputs.dtype, device=inputs.device ) random_tensor.floor_() # binarize output = inputs.div(keep_prob) * random_tensor return output def forward(self, x): res = self.body(x) if self.shortcut is not None: if self.drop_connect_rate > 0 and self.training: res = self.drop_connect(res, self.training, self.drop_connect_rate) res = res + self.shortcut(x) return res else: return res def __repr__(self): name = "k{}_e{}_g{}_i{}_o{}_s{}".format( self.kernel_size, self.expansion, self.groups, self.in_channels, self.out_channels, self.stride, ) return name class EpilogueD(nn.Module): def __init__(self, num_in_channels, num_out_channels, num_classes): super().__init__() self.net = nn.Sequential( nn.Conv2d(num_in_channels, 1152, 1, bias=False), nn.BatchNorm2d(1152), nn.ReLU(), nn.AdaptiveAvgPool2d(1), nn.Conv2d(1152, num_out_channels, 1, bias=False), nn.ReLU(), nn.Flatten(), nn.Dropout(p=0.2), nn.Linear(num_out_channels, num_classes), ) def __repr__(self): return "Epilogue" def forward(self, x): x = self.net(x) return x
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph
graph
random
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Set, Tuple from syngen.generator.graph.fitter import RMATFitter from syngen.generator.graph.rmat import RMATGenerator class RandomGraph(RMATGenerator): """ Graph generator based on erdos-renyi model that generate random non-partite graphs Args: seed (int): Seed to reproduce the results. If None then random seed will be used. logdir (str): Directory to store the logging results. fitter (RMATFitter): RMATFitter to be used. """ def __init__(self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, **kwargs): super().__init__(seed, logdir, gpu, fitter=RMATFitter(random=True)) self.fit() def fit( self, graph: Optional[List[Tuple[int, int]]] = None, is_directed: bool = None, **kwargs, ): """ Fits generator on the graph. For random graph it's graph independent.""" self._fit_results = self.fitter.fit(graph)
TensorFlow2/Detection/Efficientdet/utils
utils
model_utils
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common utils.""" import contextlib import os from typing import Text, Tuple, Union from absl import logging import numpy as np import tensorflow.compat.v1 as tf import tensorflow.compat.v2 as tf2 # pylint: disable=logging-format-interpolation def get_ema_vars(): """Get all exponential moving average (ema) variables.""" ema_vars = tf.trainable_variables() + \ tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES) for v in tf.global_variables(): # We maintain mva for batch norm moving mean and variance as well. if 'moving_mean' in v.name or 'moving_variance' in v.name: ema_vars.append(v) return list(set(ema_vars)) def get_ckpt_var_map(ckpt_path, ckpt_scope, var_scope, skip_mismatch=None): """Get a var map for restoring from pretrained checkpoints. Args: ckpt_path: string. A pretrained checkpoint path. ckpt_scope: string. Scope name for checkpoint variables. var_scope: string. Scope name for model variables. skip_mismatch: skip variables if shape mismatch. Returns: var_map: a dictionary from checkpoint name to model variables. """ logging.info('Init model from checkpoint {}'.format(ckpt_path)) if not ckpt_scope.endswith('/') or not var_scope.endswith('/'): raise ValueError('Please specific scope name ending with /') if ckpt_scope.startswith('/'): ckpt_scope = ckpt_scope[1:] if var_scope.startswith('/'): var_scope = var_scope[1:] var_map = {} # Get the list of vars to restore. model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=var_scope) reader = tf.train.load_checkpoint(ckpt_path) ckpt_var_name_to_shape = reader.get_variable_to_shape_map() ckpt_var_names = set(reader.get_variable_to_shape_map().keys()) for i, v in enumerate(model_vars): if not v.op.name.startswith(var_scope): logging.info('skip {} -- does not match scope {}'.format( v.op.name, var_scope)) ckpt_var = ckpt_scope + v.op.name[len(var_scope):] if (ckpt_var not in ckpt_var_names and v.op.name.endswith('/ExponentialMovingAverage')): ckpt_var = ckpt_scope + v.op.name[:-len('/ExponentialMovingAverage')] if ckpt_var not in ckpt_var_names: if 'Momentum' in ckpt_var or 'RMSProp' in ckpt_var: # Skip optimizer variables. continue if skip_mismatch: logging.info('skip {} ({}) -- not in ckpt'.format(v.op.name, ckpt_var)) continue raise ValueError('{} is not in ckpt {}'.format(v.op, ckpt_path)) if v.shape != ckpt_var_name_to_shape[ckpt_var]: if skip_mismatch: logging.info('skip {} ({} vs {}) -- shape mismatch'.format( v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var])) continue raise ValueError('shape mismatch {} ({} vs {})'.format( v.op.name, v.shape, ckpt_var_name_to_shape[ckpt_var])) if i < 5: # Log the first few elements for sanity check. logging.info('Init {} from ckpt var {}'.format(v.op.name, ckpt_var)) var_map[ckpt_var] = v return var_map def drop_connect(inputs, is_training, survival_prob): """Drop the entire conv with given survival probability.""" # "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf if not is_training: return inputs # Compute tensor. batch_size = tf.shape(inputs)[0] random_tensor = survival_prob random_tensor += tf.random.uniform([batch_size, 1, 1, 1], dtype=inputs.dtype) binary_tensor = tf.floor(random_tensor) # Unlike conventional way that multiply survival_prob at test time, here we # divide survival_prob at training time, such that no addition compute is # needed at test time. output = inputs / survival_prob * binary_tensor return output def num_params_flops(readable_format=True): """Return number of parameters and flops.""" nparams = np.sum( [np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]) options = tf.profiler.ProfileOptionBuilder.float_operation() options['output'] = 'none' flops = tf.profiler.profile( tf.get_default_graph(), options=options).total_float_ops # We use flops to denote multiply-adds, which is counted as 2 ops in tfprof. flops = flops // 2 if readable_format: nparams = float(nparams) * 1e-6 flops = float(flops) * 1e-9 return nparams, flops conv_kernel_initializer = tf.initializers.variance_scaling() dense_kernel_initializer = tf.initializers.variance_scaling() class Pair(tuple): def __new__(cls, name, value): return super().__new__(cls, (name, value)) def __init__(self, name, _): # pylint: disable=super-init-not-called self.name = name def scalar(name, tensor, is_tpu=True): """Stores a (name, Tensor) tuple in a custom collection.""" logging.info('Adding scale summary {}'.format(Pair(name, tensor))) if is_tpu: tf.add_to_collection('scalar_summaries', Pair(name, tf.reduce_mean(tensor))) else: tf.summary.scalar(name, tf.reduce_mean(tensor)) def image(name, tensor, is_tpu=True): logging.info('Adding image summary {}'.format(Pair(name, tensor))) if is_tpu: tf.add_to_collection('image_summaries', Pair(name, tensor)) else: tf.summary.image(name, tensor) def get_tpu_host_call(global_step, params): """Get TPU host call for summaries.""" scalar_summaries = tf.get_collection('scalar_summaries') if params['img_summary_steps']: image_summaries = tf.get_collection('image_summaries') else: image_summaries = [] if not scalar_summaries and not image_summaries: return None # No summaries to write. model_dir = params['model_dir'] iterations_per_loop = params.get('iterations_per_loop', 100) img_steps = params['img_summary_steps'] def host_call_fn(global_step, *args): """Training host call. Creates summaries for training metrics.""" gs = global_step[0] with tf2.summary.create_file_writer( model_dir, max_queue=iterations_per_loop).as_default(): with tf2.summary.record_if(True): for i, _ in enumerate(scalar_summaries): name = scalar_summaries[i][0] tensor = args[i][0] tf2.summary.scalar(name, tensor, step=gs) if img_steps: with tf2.summary.record_if(lambda: tf.math.equal(gs % img_steps, 0)): # Log images every 1k steps. for i, _ in enumerate(image_summaries): name = image_summaries[i][0] tensor = args[i + len(scalar_summaries)] tf2.summary.image(name, tensor, step=gs) return tf.summary.all_v2_summary_ops() reshaped_tensors = [tf.reshape(t, [1]) for _, t in scalar_summaries] reshaped_tensors += [t for _, t in image_summaries] global_step_t = tf.reshape(global_step, [1]) return host_call_fn, [global_step_t] + reshaped_tensors def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path): """Archive a checkpoint if the metric is better.""" ckpt_dir, ckpt_name = os.path.split(ckpt_path) saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt') saved_objective = float('-inf') if tf.io.gfile.exists(saved_objective_path): with tf.io.gfile.GFile(saved_objective_path, 'r') as f: saved_objective = float(f.read()) if saved_objective > ckpt_objective: logging.info('Ckpt {} is worse than {}'.format(ckpt_objective, saved_objective)) return False filenames = tf.io.gfile.glob(ckpt_path + '.*') if filenames is None: logging.info('No files to copy for checkpoint {}'.format(ckpt_path)) return False # clear up the backup folder. backup_dir = os.path.join(ckpt_dir, 'backup') if tf.io.gfile.exists(backup_dir): tf.io.gfile.rmtree(backup_dir) # rename the old checkpoints to backup folder. dst_dir = os.path.join(ckpt_dir, 'archive') if tf.io.gfile.exists(dst_dir): logging.info('mv {} to {}'.format(dst_dir, backup_dir)) tf.io.gfile.rename(dst_dir, backup_dir) # Write checkpoints. tf.io.gfile.makedirs(dst_dir) for f in filenames: dest = os.path.join(dst_dir, os.path.basename(f)) tf.io.gfile.copy(f, dest, overwrite=True) ckpt_state = tf.train.generate_checkpoint_state_proto( dst_dir, model_checkpoint_path=os.path.join(dst_dir, ckpt_name)) with tf.io.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f: f.write(str(ckpt_state)) with tf.io.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f: f.write('%s' % ckpt_eval) # Update the best objective. with tf.io.gfile.GFile(saved_objective_path, 'w') as f: f.write('%f' % ckpt_objective) logging.info('Copying checkpoint {} to {}'.format(ckpt_path, dst_dir)) return True def parse_image_size(image_size: Union[Text, int, Tuple[int, int]]): """Parse the image size and return (height, width). Args: image_size: A integer, a tuple (H, W), or a string with HxW format. Returns: A tuple of integer (height, width). """ if isinstance(image_size, int): # image_size is integer, with the same width and height. return (image_size, image_size) if isinstance(image_size, str): # image_size is a string with format WxH width, height = image_size.lower().split('x') return (int(height), int(width)) if isinstance(image_size, tuple): return image_size raise ValueError('image_size must be an int, WxH string, or (height, width)' 'tuple. Was %r' % image_size) def get_feat_sizes(image_size: Union[Text, int, Tuple[int, int]], max_level: int): """Get feat widths and heights for all levels. Args: image_size: A integer, a tuple (H, W), or a string with HxW format. max_level: maximum feature level. Returns: feat_sizes: a list of tuples (height, width) for each level. """ image_size = parse_image_size(image_size) feat_sizes = [{'height': image_size[0], 'width': image_size[1]}] feat_size = image_size for _ in range(1, max_level + 1): feat_size = ((feat_size[0] - 1) // 2 + 1, (feat_size[1] - 1) // 2 + 1) feat_sizes.append({'height': feat_size[0], 'width': feat_size[1]}) return feat_sizes def verify_feats_size(feats, feat_sizes, min_level, max_level, data_format='channels_last'): """Verify the feature map sizes.""" expected_output_size = feat_sizes[min_level:max_level + 1] for cnt, size in enumerate(expected_output_size): h_id, w_id = (2, 3) if data_format == 'channels_first' else (1, 2) if feats[cnt].shape[h_id] != size['height']: raise ValueError( 'feats[{}] has shape {} but its height should be {}.' '(input_height: {}, min_level: {}, max_level: {}.)'.format( cnt, feats[cnt].shape, size['height'], feat_sizes[0]['height'], min_level, max_level)) if feats[cnt].shape[w_id] != size['width']: raise ValueError( 'feats[{}] has shape {} but its width should be {}.' '(input_width: {}, min_level: {}, max_level: {}.)'.format( cnt, feats[cnt].shape, size['width'], feat_sizes[0]['width'], min_level, max_level)) @contextlib.contextmanager def float16_scope(): """Scope class for float16.""" def _custom_getter(getter, *args, **kwargs): """Returns a custom getter that methods must be called under.""" cast_to_float16 = False requested_dtype = kwargs['dtype'] if requested_dtype == tf.float16: kwargs['dtype'] = tf.float32 cast_to_float16 = True var = getter(*args, **kwargs) if cast_to_float16: var = tf.cast(var, tf.float16) return var with tf.variable_scope('', custom_getter=_custom_getter) as varscope: yield varscope def set_precision_policy(policy_name: Text = None, loss_scale: bool = False): """Set precision policy according to the name. Args: policy_name: precision policy name, one of 'float32', 'mixed_float16', 'mixed_bfloat16', or None. loss_scale: whether to use loss scale (only for training). """ if not policy_name: return assert policy_name in ('mixed_float16', 'mixed_bfloat16', 'float32') logging.info('use mixed precision policy name %s', policy_name) # TODO(tanmingxing): use tf.keras.layers.enable_v2_dtype_behavior() when it # available in stable TF release. from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top,g-direct-tensorflow-import base_layer_utils.enable_v2_dtype_behavior() # mixed_float16 training is not supported for now, so disable loss_scale. # float32 and mixed_bfloat16 do not need loss scale for training. if loss_scale: policy = tf2.keras.mixed_precision.experimental.Policy(policy_name) else: policy = tf2.keras.mixed_precision.experimental.Policy( policy_name, loss_scale=None) tf2.keras.mixed_precision.experimental.set_policy(policy) def build_model_with_precision(pp, mm, ii, tt, *args, **kwargs): """Build model with its inputs/params for a specified precision context. This is highly specific to this codebase, and not intended to be general API. Advanced users only. DO NOT use it if you don't know what it does. NOTE: short argument names are intended to avoid conficts with kwargs. Args: pp: A string, precision policy name, such as "mixed_float16". mm: A function, for rmodel builder. ii: A tensor, for model inputs. tt: A bool, If true, it is for training; otherwise, it is for eval. *args: A list of model arguments. **kwargs: A dict, extra model parameters. Returns: the output of mm model. """ if pp == 'mixed_bfloat16': set_precision_policy(pp) inputs = tf.cast(ii, tf.bfloat16) with tf.tpu.bfloat16_scope(): outputs = mm(inputs, *args, **kwargs) set_precision_policy('float32') elif pp == 'mixed_float16': set_precision_policy(pp, loss_scale=tt) inputs = tf.cast(ii, tf.float16) with float16_scope(): outputs = mm(inputs, *args, **kwargs) set_precision_policy('float32') elif not pp or pp == 'float32': outputs = mm(ii, *args, **kwargs) else: raise ValueError('Unknow precision name {}'.format(pp)) # Users are responsible to convert the dtype of all outputs. return outputs
PyTorch/Segmentation/MaskRCNN/pytorch/configs
configs
e2e_mask_rcnn_R_50_FPN_1x_bs64
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TRAIN: 8000 ROI_HEADS: USE_FPN: True ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False MASK_ON: True DATASETS: TRAIN: ("coco_2017_train",) TEST: ("coco_2017_val",) DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.08 WEIGHT_DECAY: 0.0001 STEPS: (18000,24000) MAX_ITER: 27500 IMS_PER_BATCH: 64 WARMUP_FACTOR: 0.0001 WARMUP_ITERS: 800 WARMUP_METHOD: "mlperf_linear" TEST: IMS_PER_BATCH: 8
TensorFlow2/LanguageModeling/BERT/official/modeling/training
training
distributed_executor
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Custom training loop for running TensorFlow 2.0 models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import json import os from absl import flags from absl import logging import numpy as np import tensorflow as tf # pylint: disable=unused-import,g-import-not-at-top,redefined-outer-name,reimported from typing import Optional, Dict, List, Text, Callable, Union, Iterator, Any from official.modeling.hyperparams import params_dict from official.utils.misc import tpu_lib FLAGS = flags.FLAGS def strategy_flags_dict(): """Returns TPU related flags in a dictionary.""" return { # TPUStrategy related flags. 'tpu': FLAGS.tpu, # MultiWorkerMirroredStrategy related flags. 'worker_hosts': FLAGS.worker_hosts, 'task_index': FLAGS.task_index, } def hparam_flags_dict(): """Returns model params related flags in a dictionary.""" return { 'data_dir': FLAGS.data_dir, 'model_dir': FLAGS.model_dir, 'train_batch_size': FLAGS.train_batch_size, 'eval_batch_size': FLAGS.eval_batch_size, 'precision': FLAGS.precision, 'config_file': FLAGS.config_file, 'params_override': FLAGS.params_override, } def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix): """Saves model to model_dir with provided checkpoint prefix.""" checkpoint_path = os.path.join(model_dir, checkpoint_prefix) saved_path = checkpoint.save(checkpoint_path) logging.info('Saving model as TF checkpoint: %s', saved_path) def _steps_to_run(current_step, total_steps, steps_per_loop): """Calculates steps to run on device.""" if steps_per_loop <= 0: raise ValueError('steps_per_loop should be positive integer.') return min(total_steps - current_step, steps_per_loop) def _no_metric(): return None class SummaryWriter(object): """Simple SummaryWriter for writing dictionary of metrics. Attributes: _writer: The tf.SummaryWriter. """ def __init__(self, model_dir: Text, name: Text): """Inits SummaryWriter with paths. Arguments: model_dir: the model folder path. name: the summary subfolder name. """ self._writer = tf.summary.create_file_writer(os.path.join(model_dir, name)) def __call__(self, metrics: Union[Dict[Text, float], float], step: int): """Write metrics to summary with the given writer. Args: metrics: a dictionary of metrics values. Prefer dictionary. step: integer. The training step. """ if not isinstance(metrics, dict): # Support scalar metric without name. logging.warning('Warning: summary writer prefer metrics as dictionary.') metrics = {'metric': metrics} with self._writer.as_default(): for k, v in metrics.items(): tf.summary.scalar(k, v, step=step) self._writer.flush() class DistributedExecutor(object): """Interface to train and eval models with tf.distribute.Strategy. Arguments: strategy: an instance of tf.distribute.Strategy. params: Model configuration needed to run distribution strategy. model_fn: Keras model function. Signature: (params: ParamsDict) -> tf.keras.models.Model. loss_fn: loss function. Signature: (y_true: Tensor, y_pred: Tensor) -> Tensor metric_fn: metric function. Signature: () -> tf.keras.metrics.Metric. is_multi_host: Set to True when using multi hosts for training, like multi worker GPU or TPU pod (slice). Otherwise, False. """ def __init__(self, strategy, params, model_fn, loss_fn, is_multi_host=False): self._params = params self._model_fn = model_fn self._loss_fn = loss_fn self._strategy = strategy self._checkpoint_name = 'ctl_step_{step}.ckpt' self._is_multi_host = is_multi_host @property def checkpoint_name(self): """Returns default checkpoint name.""" return self._checkpoint_name @checkpoint_name.setter def checkpoint_name(self, name): """Sets default summary writer for the current thread.""" self._checkpoint_name = name def loss_fn(self): return self._loss_fn() def model_fn(self, params): return self._model_fn(params) def _save_config(self, model_dir): """Save parameters to config files if model_dir is defined.""" logging.info('Save config to model_dir %s.', model_dir) if model_dir: if not tf.io.gfile.exists(model_dir): tf.io.gfile.makedirs(model_dir) self._params.lock() params_dict.save_params_dict_to_yaml(self._params, model_dir + '/params.yaml') else: logging.warning('model_dir is empty, so skip the save config.') def _get_input_iterator( self, input_fn: Callable[..., tf.data.Dataset], strategy: tf.distribute.Strategy) -> Optional[Iterator[Any]]: """Returns distributed dataset iterator. Args: input_fn: (params: dict) -> tf.data.Dataset. strategy: an instance of tf.distribute.Strategy. Returns: An iterator that yields input tensors. """ if input_fn is None: return None # When training with multiple TPU workers, datasets needs to be cloned # across workers. Since Dataset instance cannot be cloned in eager mode, # we instead pass callable that returns a dataset. if self._is_multi_host: return iter( strategy.experimental_distribute_datasets_from_function(input_fn)) else: input_data = input_fn() return iter(strategy.experimental_distribute_dataset(input_data)) def _create_replicated_step(self, strategy, model, loss_fn, optimizer, metric=None): def _replicated_step(inputs): """Replicated training step.""" inputs, labels = inputs with tf.GradientTape() as tape: outputs = model(inputs, training=True) prediction_loss = loss_fn(labels, outputs) loss = tf.reduce_mean(prediction_loss) loss = loss / strategy.num_replicas_in_sync if isinstance(metric, tf.keras.metrics.Metric): metric.update_state(labels, outputs) else: logging.error('train metric is not an instance of ' 'tf.keras.metrics.Metric.') grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss return _replicated_step def _create_train_step(self, strategy, model, loss_fn, optimizer, metric=None): """Creates a distributed training step. Args: strategy: an instance of tf.distribute.Strategy. model: (Tensor, bool) -> Tensor. model function. loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor. optimizer: tf.keras.optimizers.Optimizer. iterator: an iterator that yields input tensors. metric: tf.keras.metrics.Metric subclass. Returns: The training step callable. """ _replicated_step = self._create_replicated_step(strategy, model, loss_fn, optimizer, metric) @tf.function def train_step(iterator, num_steps): """Performs a distributed training step. Args: iterator: an iterator that yields input tensors. Returns: The loss tensor. """ if not isinstance(num_steps, tf.Tensor): raise ValueError('steps should be an Tensor. Python object may cause ' 'retracing.') per_replica_losses = strategy.experimental_run_v2( _replicated_step, args=(next(iterator),)) for _ in tf.range(num_steps - 1): per_replica_losses = strategy.experimental_run_v2( _replicated_step, args=(next(iterator),)) # For reporting, we returns the mean of losses. loss = strategy.reduce( tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return loss return train_step def _create_test_step(self, strategy, model, metric): """Creates a distributed test step.""" @tf.function def test_step(iterator): """Calculates evaluation metrics on distributed devices.""" if not metric: logging.info('Skip test_step because metric is None (%s)', metric) return None, None if not isinstance(metric, tf.keras.metrics.Metric): raise ValueError( 'Metric must be an instance of tf.keras.metrics.Metric ' 'for running in test_step. Actual {}'.format(metric)) def _test_step_fn(inputs): """Replicated accuracy calculation.""" inputs, labels = inputs model_outputs = model(inputs, training=False) metric.update_state(labels, model_outputs) return labels, model_outputs return strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),)) return test_step def train(self, train_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset] = None, model_dir: Text = None, total_steps: int = 1, iterations_per_loop: int = 1, train_metric_fn: Callable[[], Any] = None, eval_metric_fn: Callable[[], Any] = None, summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter, init_checkpoint: Callable[[tf.keras.Model], Any] = None, custom_callbacks: List[tf.keras.callbacks.Callback] = None, save_config: bool = True): """Runs distributed training. Args: train_input_fn: (params: dict) -> tf.data.Dataset training data input function. eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. model_dir: the folder path for model checkpoints. total_steps: total training steps. iterations_per_loop: train steps per loop. After each loop, this job will update metrics like loss and save checkpoint. train_metric_fn: metric_fn for evaluation in train_step. eval_metric_fn: metric_fn for evaluation in test_step. summary_writer_fn: function to create summary writer. init_checkpoint: function to load checkpoint. custom_callbacks: A list of Keras Callbacks objects to run during training. More specifically, `on_batch_begin()`, `on_batch_end()`, methods are invoked during training. save_config: bool. Whether to save params to model_dir. Returns: The training loss and eval metrics. """ assert train_input_fn is not None if train_metric_fn and not callable(train_metric_fn): raise ValueError('if `train_metric_fn` is specified, ' 'train_metric_fn must be a callable.') if eval_metric_fn and not callable(eval_metric_fn): raise ValueError('if `eval_metric_fn` is specified, ' 'eval_metric_fn must be a callable.') train_metric_fn = train_metric_fn or _no_metric eval_metric_fn = eval_metric_fn or _no_metric if custom_callbacks and iterations_per_loop != 1: logging.error( 'It is sematically wrong to run callbacks when ' 'iterations_per_loop is not one (%s)', iterations_per_loop) def _run_callbacks_on_batch_begin(batch): """Runs custom callbacks at the start of every step.""" if not custom_callbacks: return for callback in custom_callbacks: if callback: callback.on_batch_begin(batch) def _run_callbacks_on_batch_end(batch): """Runs custom callbacks at the end of every step.""" if not custom_callbacks: return for callback in custom_callbacks: if callback: callback.on_batch_end(batch) if save_config: self._save_config(model_dir) if FLAGS.save_checkpoint_freq: save_freq = FLAGS.save_checkpoint_freq else: save_freq = iterations_per_loop params = self._params strategy = self._strategy # To reduce unnecessary send/receive input pipeline operation, we place # input pipeline ops in worker task. train_iterator = self._get_input_iterator(train_input_fn, strategy) train_loss = None eval_metric_result = None with strategy.scope(): # To correctly place the model weights on accelerators, # model and optimizer should be created in scope. model = self.model_fn(params.as_dict()) if not hasattr(model, 'optimizer'): raise ValueError('User should set optimizer attribute to model ' 'inside `model_fn`.') optimizer = model.optimizer # Training loop starts here. checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer) latest_checkpoint_file = tf.train.latest_checkpoint(model_dir) initial_step = 0 if latest_checkpoint_file: logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', latest_checkpoint_file) checkpoint.restore(latest_checkpoint_file) initial_step = optimizer.iterations.numpy() logging.info('Loading from checkpoint file completed. Init step %d', initial_step) elif init_checkpoint: logging.info('Restoring from init checkpoint function') init_checkpoint(model) logging.info('Loading from init checkpoint file completed') current_step = optimizer.iterations.numpy() checkpoint_name = self.checkpoint_name eval_metric = eval_metric_fn() train_metric = train_metric_fn() train_summary_writer = summary_writer_fn(model_dir, 'eval_train') test_summary_writer = summary_writer_fn(model_dir, 'eval_test') # Continue training loop. train_step = self._create_train_step( strategy=strategy, model=model, loss_fn=self.loss_fn(), optimizer=optimizer, metric=train_metric) test_step = None if eval_input_fn and eval_metric: test_step = self._create_test_step(strategy, model, metric=eval_metric) logging.info('Training started') last_save_checkpoint_step = current_step while current_step < total_steps: num_steps = _steps_to_run(current_step, total_steps, iterations_per_loop) _run_callbacks_on_batch_begin(current_step) train_loss = train_step(train_iterator, tf.convert_to_tensor(num_steps, dtype=tf.int32)) _run_callbacks_on_batch_end(current_step) current_step += num_steps train_loss = tf.nest.map_structure(lambda x: x.numpy().astype(float), train_loss) if not isinstance(train_loss, dict): train_loss = {'total_loss': train_loss} if np.isnan(train_loss['total_loss']): raise ValueError('total loss is NaN.') if train_metric: train_metric_result = train_metric.result() if isinstance(train_metric, tf.keras.metrics.Metric): train_metric_result = tf.nest.map_structure( lambda x: x.numpy().astype(float), train_metric_result) if not isinstance(train_metric_result, dict): train_metric_result = {'metric': train_metric_result} train_metric_result.update(train_loss) else: train_metric_result = train_loss if callable(optimizer.lr): train_metric_result.update( {'learning_rate': optimizer.lr(current_step).numpy()}) else: train_metric_result.update({'learning_rate': optimizer.lr.numpy()}) logging.info('Train Step: %d/%d / loss = %s / training metric = %s', current_step, total_steps, train_loss, train_metric_result) train_summary_writer( metrics=train_metric_result, step=optimizer.iterations) # Saves model checkpoints and run validation steps at every # iterations_per_loop steps. # To avoid repeated model saving, we do not save after the last # step of training. if save_freq > 0 and current_step < total_steps and ( current_step - last_save_checkpoint_step) >= save_freq: _save_checkpoint(checkpoint, model_dir, checkpoint_name.format(step=current_step)) last_save_checkpoint_step = current_step if test_step: eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Step: %s evalation metric = %s.', current_step, eval_metric_result) test_summary_writer( metrics=eval_metric_result, step=optimizer.iterations) # Re-initialize evaluation metric, except the last step. if eval_metric and current_step < total_steps: eval_metric.reset_states() if train_metric and current_step < total_steps: train_metric.reset_states() # Reaches the end of training and saves the last checkpoint. if last_save_checkpoint_step < total_steps: _save_checkpoint(checkpoint, model_dir, checkpoint_name.format(step=current_step)) if test_step: logging.info('Running final evaluation after training is complete.') eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Final evaluation metric = %s.', eval_metric_result) test_summary_writer( metrics=eval_metric_result, step=optimizer.iterations) return train_loss, eval_metric_result def _run_evaluation(self, test_step, current_training_step, metric, test_iterator): """Runs validation steps and aggregate metrics.""" if not test_iterator or not metric: logging.warning( 'Both test_iterator (%s) and metrics (%s) must not be None.', test_iterator, metric) return None logging.info('Running evaluation after step: %s.', current_training_step) while True: try: test_step(test_iterator) except (StopIteration, tf.errors.OutOfRangeError): break metric_result = metric.result() if isinstance(metric, tf.keras.metrics.Metric): metric_result = metric_result.numpy().astype(float) logging.info('Step: [%d] Validation metric = %f', current_training_step, metric_result) return metric_result def evaluate_from_model_dir( self, model_dir: Text, eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_metric_fn: Callable[[], Any], total_steps: int = -1, eval_timeout: int = None, min_eval_interval: int = 180, summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter): """Runs distributed evaluation on model folder. Args: eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. eval_metric_fn: metric_fn for evaluation in test_step. model_dir: the folder for storing model checkpoints. total_steps: total training steps. If the current step reaches the total_steps, the evaluation loop will stop. eval_timeout: The maximum number of seconds to wait between checkpoints. If left as None, then the process will wait indefinitely. Used by tf.train.checkpoints_iterator. min_eval_interval: The minimum number of seconds between yielding checkpoints. Used by tf.train.checkpoints_iterator. summary_writer_fn: function to create summary writer. Returns: Eval metrics dictionary of the last checkpoint. """ if not model_dir: raise ValueError('model_dir must be set.') def terminate_eval(): tf.logging.info('Terminating eval after %d seconds of no checkpoints' % eval_timeout) return True summary_writer = summary_writer_fn(model_dir, 'eval') # Read checkpoints from the given model directory # until `eval_timeout` seconds elapses. for checkpoint_path in tf.train.checkpoints_iterator( model_dir, min_interval_secs=min_eval_interval, timeout=eval_timeout, timeout_fn=terminate_eval): eval_metric_result, current_step = self.evaluate_checkpoint( checkpoint_path=checkpoint_path, eval_input_fn=eval_input_fn, eval_metric_fn=eval_metric_fn, summary_writer=summary_writer) if total_steps > 0 and current_step >= total_steps: logging.info('Evaluation finished after training step %d', current_step) break return eval_metric_result def evaluate_checkpoint(self, checkpoint_path: Text, eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset], eval_metric_fn: Callable[[], Any], summary_writer: SummaryWriter = None): """Runs distributed evaluation on the one checkpoint. Args: eval_input_fn: (Optional) same type as train_input_fn. If not None, will trigger evaluting metric on eval data. If None, will not run eval step. eval_metric_fn: metric_fn for evaluation in test_step. checkpoint_path: the checkpoint to evaluate. summary_writer_fn: function to create summary writer. Returns: Eval metrics dictionary of the last checkpoint. """ if not callable(eval_metric_fn): raise ValueError('if `eval_metric_fn` is specified, ' 'eval_metric_fn must be a callable.') params = self._params strategy = self._strategy # To reduce unnecessary send/receive input pipeline operation, we place # input pipeline ops in worker task. with strategy.scope(): # To correctly place the model weights on accelerators, # model and optimizer should be created in scope. model = self.model_fn(params.as_dict()) checkpoint = tf.train.Checkpoint(model=model) eval_metric = eval_metric_fn() assert eval_metric, 'eval_metric does not exist' test_step = self._create_test_step(strategy, model, metric=eval_metric) logging.info('Starting to evaluate.') if not checkpoint_path: raise ValueError('checkpoint path is empty') reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_path) current_step = reader.get_tensor( 'optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE') logging.info( 'Checkpoint file %s found and restoring from ' 'checkpoint', checkpoint_path) checkpoint.restore(checkpoint_path) eval_iterator = self._get_input_iterator(eval_input_fn, strategy) eval_metric_result = self._run_evaluation(test_step, current_step, eval_metric, eval_iterator) logging.info('Step: %s evalation metric = %s.', current_step, eval_metric_result) summary_writer(metrics=eval_metric_result, step=current_step) eval_metric.reset_states() return eval_metric_result, current_step def predict(self): return NotImplementedError('Unimplmented function.') # TODO(yeqing): Add unit test for MultiWorkerMirroredStrategy. class ExecutorBuilder(object): """Builder of DistributedExecutor. Example 1: Builds an executor with supported Strategy. builder = ExecutorBuilder( strategy_type='tpu', strategy_config={'tpu': '/bns/xxx'}) dist_executor = builder.build_executor( params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Example 2: Builds an executor with customized Strategy. builder = ExecutorBuilder() builder.strategy = <some customized Strategy> dist_executor = builder.build_executor( params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Example 3: Builds a customized executor with customized Strategy. class MyDistributedExecutor(DistributedExecutor): # implementation ... builder = ExecutorBuilder() builder.strategy = <some customized Strategy> dist_executor = builder.build_executor( class_ctor=MyDistributedExecutor, params=params, model_fn=my_model_fn, loss_fn=my_loss_fn, metric_fn=my_metric_fn) Args: strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'. If None. User is responsible to set the strategy before calling build_executor(...). strategy_config: necessary config for constructing the proper Strategy. Check strategy_flags_dict() for examples of the structure. """ def __init__(self, strategy_type=None, strategy_config=None): self._strategy_config = strategy_config self._strategy = self._build_strategy(strategy_type) @property def strategy(self): """Returns default checkpoint name.""" return self._strategy @strategy.setter def strategy(self, new_strategy): """Sets default summary writer for the current thread.""" self._strategy = new_strategy def _build_strategy(self, strategy_type): """Builds tf.distribute.Strategy instance. Args: strategy_type: string. One of 'tpu', 'one_device_gpu', 'mirrored', 'multi_worker_mirrored'. Returns: An tf.distribute.Strategy object. Returns None if strategy_type is None. """ if strategy_type is None: return None if strategy_type == 'tpu': return self._build_tpu_strategy() elif strategy_type == 'one_device_gpu': return tf.distribute.OneDeviceStrategy("device:GPU:0") elif strategy_type == 'mirrored': return self._build_mirrored_strategy() elif strategy_type == 'multi_worker_mirrored': return self._build_multiworker_mirrored_strategy() else: raise NotImplementedError('Unsupport accelerator type "%s"' % strategy_type) def _build_mirrored_strategy(self): """Builds a MirroredStrategy object.""" return tf.distribute.MirroredStrategy() def _build_tpu_strategy(self): """Builds a TPUStrategy object.""" tpu = self._strategy_config.tpu logging.info('Use TPU at %s', tpu if tpu is not None else '') cluster_resolver = tpu_lib.tpu_initialize(tpu) strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver) return strategy def _build_multiworker_mirrored_strategy(self): """Builds a MultiWorkerMirroredStrategy object.""" worker_hosts = self._strategy_config.worker_hosts if worker_hosts is not None: # Set TF_CONFIG environment variable worker_hosts = worker_hosts.split(',') task_index = self._strategy_config.task_index os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': worker_hosts }, 'task': { 'type': 'worker', 'index': task_index } }) multiworker_strategy = ( tf.distribute.experimental.MultiWorkerMirroredStrategy()) return multiworker_strategy def build_executor(self, class_ctor=DistributedExecutor, params=None, model_fn=None, loss_fn=None, **kwargs): """Creates an executor according to strategy type. See doc string of the DistributedExecutor.__init__ for more information of the input arguments. Args: class_ctor: A constructor of executor (default: DistributedExecutor). params: ParamsDict, all the model parameters and runtime parameters. model_fn: Keras model function. loss_fn: loss function. **kwargs: other arguments to the executor constructor. Returns: An instance of DistributedExecutor or its subclass. """ if self._strategy is None: raise ValueError('`strategy` should not be None. You need to specify ' '`strategy_type` in the builder contructor or directly ' 'set the `strategy` property of the builder.') return class_ctor( strategy=self._strategy, params=params, model_fn=model_fn, loss_fn=loss_fn, **kwargs)
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets
datasets
ieee
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import logging import shutil from typing import Optional import cudf import cupy as cp import numpy as np import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData logger = logging.getLogger(__name__) log = logger class IEEEPreprocessing(BasePreprocessing): """ preprocessing for https://www.kaggle.com/competitions/ieee-fraud-detection """ def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): super().__init__(source_path, destination_path, download, **kwargs) def transform(self, gpu=False, use_cache=False): if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) operator = cp if gpu else np tabular_operator = cudf if gpu else pd data = tabular_operator.read_csv(os.path.join(self.source_path, 'data.csv')) data = data.fillna(0) cont_columns = [ 'TransactionDT', 'TransactionAmt', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C14', 'V279', 'V280', 'V284', 'V285', 'V286', 'V287', 'V290', 'V291', 'V292', 'V293', 'V294', 'V295', 'V297', 'V298', 'V299', 'V302', 'V303', 'V304', 'V305', 'V306', 'V307', 'V308', 'V309', 'V310', 'V311', 'V312', 'V316', 'V317', 'V318', 'V319', 'V320', 'V321', ] cat_columns = ["isFraud"] for col in ('user_id', 'product_id', *cat_columns): data[col] = data[col].astype("category").cat.codes data[col] = data[col].astype(int) structural_data = data[['user_id', 'product_id']] tabular_data = data[[*cat_columns, *cont_columns]] edge_features = self._prepare_feature_list(tabular_data, cat_columns, cont_columns) graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "user", MetaData.COUNT: int(structural_data['user_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, }, { MetaData.NAME: "product", MetaData.COUNT: int(structural_data['product_id'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, } ], MetaData.EDGES: [ { MetaData.NAME: "user-product", MetaData.COUNT: len(structural_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "product", MetaData.DIRECTED: False, MetaData.FEATURES: edge_features, MetaData.FEATURES_PATH: "user-product.parquet", MetaData.STRUCTURE_PATH: "user-product_edge_list.parquet", } ] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) tabular_data.to_parquet(os.path.join(self.destination_path, "user-product.parquet")) structural_data.to_parquet(os.path.join(self.destination_path, "user-product_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): raise NotImplementedError( "IEEE dataset does not support automatic downloading. Please run /workspace/scripts/get_datasets.sh" ) def _check_files(self) -> bool: files = ['data.csv'] return all(os.path.exists(os.path.join(self.source_path, file)) for file in files)
PyTorch/Classification/ConvNets/resnext101-32x4d/training/AMP
AMP
DGX1V_resnext101-32x4d_AMP_250E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision AMP --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
TensorFlow2/Recommendation/SIM
SIM
requirements
pynvml==11.0.0 git+https://github.com/NVIDIA/dllogger#egg=dllogger
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets
datasets
epinions
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import os import shutil import tarfile from typing import Optional from urllib.request import urlopen import cudf import cupy as cp import numpy as np import pandas as pd from syngen.configuration import SynGenDatasetFeatureSpec from syngen.preprocessing.base_preprocessing import BasePreprocessing from syngen.utils.types import MetaData logger = logging.getLogger(__name__) log = logger class EpinionsPreprocessing(BasePreprocessing): ITEM_SPACE_ARCHIVE_URL = ( "http://konect.cc/files/download.tsv.epinions-rating.tar.bz2" ) SOCIAL_SPACE_ARCHIVE_URL = ( "http://konect.cc/files/download.tsv.epinions.tar.bz2" ) def __init__( self, source_path: str, destination_path: Optional[str] = None, download: bool = False, **kwargs, ): """ preprocessing for http://www.trustlet.org/wiki/Extended_Epinions_dataset Args: """ self.ratings_file = os.path.join(source_path, 'epinions-rating', 'out.epinions-rating') self.trust_file = os.path.join(source_path, 'epinions', 'out.epinions') super().__init__(source_path, destination_path, download, **kwargs) def transform(self, gpu=False, use_cache=False): if use_cache and os.path.exists(self.destination_path): return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path) operator = cp if gpu else np tabular_operator = cudf if gpu else pd item_space_data = tabular_operator.read_csv( self.ratings_file, sep=" ", names=["userId", "itemId", "rating", "timestamp"], skiprows=1, ) social_space_data = tabular_operator.read_csv( self.trust_file, sep=" ", names=["userId", "friendId", "trust", "timestamp"], skiprows=1, ) social_space_data = social_space_data[social_space_data["trust"] == 1] min_item_id = int(item_space_data['itemId'].min()) item_space_data['itemId'] = item_space_data['itemId'] - min_item_id min_user_id = min( int(item_space_data['userId'].min()), int(social_space_data['userId'].min()), int(social_space_data['friendId'].min()) ) item_space_data['userId'] = item_space_data['userId'] - min_user_id social_space_data['userId'] = social_space_data['userId'] - min_user_id social_space_data['friendId'] = social_space_data['friendId'] - min_user_id graph_metadata = { MetaData.NODES: [ { MetaData.NAME: "user", MetaData.COUNT: int(item_space_data['userId'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, }, { MetaData.NAME: "item", MetaData.COUNT: int(item_space_data['itemId'].max()), MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, } ], MetaData.EDGES: [ { MetaData.NAME: "user-item", MetaData.COUNT: len(item_space_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "item", MetaData.DIRECTED: False, MetaData.FEATURES: [ { MetaData.NAME: "rating", MetaData.DTYPE: str(item_space_data["rating"].dtype), MetaData.FEATURE_TYPE: MetaData.CATEGORICAL, } ], MetaData.FEATURES_PATH: "user-item.parquet", MetaData.STRUCTURE_PATH: "user-item_edge_list.parquet", }, { MetaData.NAME: "user-user", MetaData.COUNT: len(social_space_data), MetaData.SRC_NODE_TYPE: "user", MetaData.DST_NODE_TYPE: "item", MetaData.DIRECTED: False, MetaData.FEATURES: [], MetaData.FEATURES_PATH: None, MetaData.STRUCTURE_PATH: "user-user_edge_list.parquet", } ] } shutil.rmtree(self.destination_path, ignore_errors=True) os.makedirs(self.destination_path) item_space_data[['rating']] \ .to_parquet(os.path.join(self.destination_path, "user-item.parquet")) item_space_data[['userId', 'itemId']] \ .rename(columns={'userId': MetaData.SRC, 'itemId': MetaData.DST}) \ .to_parquet(os.path.join(self.destination_path, "user-item_edge_list.parquet")) social_space_data[['userId', 'friendId']] \ .rename(columns={'userId': MetaData.SRC, 'friendId': MetaData.DST}) \ .to_parquet(os.path.join(self.destination_path, "user-user_edge_list.parquet")) with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f: json.dump(graph_metadata, f, indent=4) graph_metadata[MetaData.PATH] = self.destination_path return SynGenDatasetFeatureSpec(graph_metadata) def download(self): if not os.path.exists(self.source_path): os.makedirs(self.source_path) if not os.path.exists(self.ratings_file): with tarfile.open(fileobj=urlopen(self.ITEM_SPACE_ARCHIVE_URL), mode="r|bz2") as tar: tar.extractall(self.source_path) if not os.path.exists(self.trust_file): with tarfile.open(fileobj=urlopen(self.SOCIAL_SPACE_ARCHIVE_URL), mode="r|bz2") as tar: tar.extractall(self.source_path) def _check_files(self): files = [self.ratings_file, self.trust_file] return all(os.path.exists(file) for file in files)
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment
deployment
deploy
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import argparse import os import tensorflow as tf import horovod.tensorflow as hvd import deployment.tf import deployment.hps def clear_and_create_directory(repo_path): print("creating directory:", repo_path) os.makedirs(repo_path, exist_ok=True) def create_model_repo(dst, sparse_model_name, dense_model_name, ensemble_name): clear_and_create_directory(dst) created = [] for name in sparse_model_name, dense_model_name, ensemble_name: d = os.path.join(dst, name) clear_and_create_directory(d) created.append(d) return created def set_tf_memory_growth(): physical_devices = tf.config.list_physical_devices("GPU") for d in physical_devices: tf.config.experimental.set_memory_growth(d, True) def main(): parser = argparse.ArgumentParser(description="") parser.add_argument( "--checkpoint-dir", type=str, help="Source directory with a checkpoint" ) parser.add_argument( "--model-repository-path", type=str, help="Destination directory with Triton model repository", ) parser.add_argument( "--model-name", type=str, help="The name of the model used for inference.", required=True, ) parser.add_argument( "--sparse-model-name", type=str, default='sparse' ) parser.add_argument( "--dense-model-name", type=str, default='dense' ) parser.add_argument( "--model-version", type=int, help="The version of the model used for inference.", required=False, default=1, ) parser.add_argument( "--dense-format", type=str, help="Target format of dense model part in ensemble.", choices=["tf-savedmodel", "onnx", "trt"], required=True, default="tf-savedmodel", ) parser.add_argument( "--sparse-format", type=str, help="Target format of dense model part in ensemble.", choices=["tf-savedmodel", "hps"], required=True, default="tf-savedmodel", ) parser.add_argument( "--model-precision", type=str, help="Target precision of dense model part in ensemble.", choices=["fp16", "fp32"], required=True, default="fp32", ) parser.add_argument( "--max-batch-size", type=int, help="The maximal batch size for deployed model.", required=False, default=32768, ) parser.add_argument( "--trt-optimal-batch-size", type=int, help="Batch size to optimize TensorRT performance for.", required=False, default=1024, ) parser.add_argument( "--memory-threshold-gb", type=int, help="Amount of memory in GB after reaching which CPU offloading will be used", required=False, default=70, ) parser.add_argument( "--engine-count-per-device", type=int, default=1, help="Number of model instances per GPU", ) parser.add_argument( "--num_gpus", type=int, default=1, help="Number of GPUs to deploy HPS onto", ) parser.add_argument( "--fused_embedding", action="store_true", default=False, help="Fuse the embedding table together for better GPU utilization.", ) parser.add_argument( "--hps_gpucacheper", type=float, default=0.25, help="Fraction of the embeddings to store in GPU cache.", ) parser.add_argument( "--server-url", type=str, default="grpc://127.0.0.1:8001", help="Url of Triton Inference Server", required=False, ) parser.add_argument( "--load-model", action="store_true", default=False, help="Call load model Triton endpoint after creating model store.", ) parser.add_argument( "--load-model-timeout-s", type=int, default=120, help="Timeout of load model operation.", required=False, ) parser.add_argument( "--verbose", action="store_true", default=False, help="Enable verbose logging", ) parser.add_argument( "--cpu", action="store_true", default=False, help="Run the entire model on CPU", ) parser.add_argument( "--monolithic", action="store_true", default=False, help="Don't use the ensemble paradigm. Instead, save everything into a single large SavedModel file", ) args = parser.parse_args() hvd.init() set_tf_memory_growth() deployment_package = deployment.hps if args.sparse_format == 'hps' else deployment.tf if args.monolithic: deployment_package.deploy_monolithic(sparse_src=os.path.join(args.checkpoint_dir, "sparse"), dense_src=os.path.join(args.checkpoint_dir, "dense"), dst=args.model_repository_path, model_name='dlrm', max_batch_size=65536, engine_count_per_device=1, num_gpus=1, version="1", cpu=args.cpu, model_precision='fp32') return sparse_dst, dense_dst, ensemble_dst = create_model_repo( dst=args.model_repository_path, ensemble_name=args.model_name, sparse_model_name=args.sparse_model_name, dense_model_name=args.dense_model_name ) num_numerical_features = deployment_package.deploy_dense( src=os.path.join(args.checkpoint_dir, "dense"), dst=dense_dst, model_name=args.dense_model_name, model_format=args.dense_format, model_precision=args.model_precision, max_batch_size=args.max_batch_size, trt_optimal_batch_size=args.trt_optimal_batch_size, engine_count_per_device=args.engine_count_per_device, ) num_cat_features = deployment_package.deploy_sparse( src=os.path.join(args.checkpoint_dir, "sparse"), dst=sparse_dst, model_name=args.sparse_model_name, num_gpus=args.num_gpus, fused=args.fused_embedding, max_batch_size=args.max_batch_size, gpucacheper=args.hps_gpucacheper, engine_count_per_device=args.engine_count_per_device, memory_threshold_gb=args.memory_threshold_gb ) deployment_package.deploy_ensemble( dst=ensemble_dst, model_name=args.model_name, sparse_model_name=args.sparse_model_name, dense_model_name=args.dense_model_name, num_cat_features=num_cat_features, num_numerical_features=num_numerical_features, version=args.model_version, max_batch_size=args.max_batch_size, ) if __name__ == "__main__": main()
TensorFlow2/Recommendation/DLRM_and_DCNv2/doc
doc
merlin_hps_inference
# Deploying Large Recommender models with Merlin HPS and Triton Inference Server This file contains instructions to run inference on Triton Inference Server as well as detailed performance analysis for DLRM and DCNv2 with Merlin HPS and TensorRT. It is intended to provide the best possible performance for inference with recommender models that don't fit into a single GPU memory. For models that can fit into a single GPU or, for some reason, cannot use Merlin HPS, we maintain a separate solution, described [here](tensorflow_inference.md). ## Solution overview ### Introduction The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a data center and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. This README provides step-by-step deployment instructions for models generated during training (as described in the [model README](../README.md)). Additionally, this README provides the corresponding deployment scripts that ensure optimal GPU utilization during inferencing on Triton Inference Server. ### Deployment with Merlin Hierarchical Parameter Server (HPS) [Merlin Hierarchical Parameter Server (HPS)](https://nvidia-merlin.github.io/HugeCTR/main/hierarchical_parameter_server/index.html) library is a native C++ library that provides caching and hierarchical storage for embeddings. The library is built from the GPU embedding cache and HPS database backend subcomponents. HPS offers flexible deployment and configuration to meet site-specific recommender system needs and is integrated by other projects that need the ability to work with embeddings that exceed the capacity of the GPU and host memory. Here, HPS is used to offload the least frequently used embedding vectors into CPU memory. This way, we can efficiently serve models that do not fit in the GPU. This approach is illustrated in Figure 1. <p align="center"> <img width="100%" src="./img/inference/cache_approach.svg" /> <br> Figure 1. GPU cache as a way to serve very large embedding tables. </p> In the example below, the model served as a Triton Ensemble, that is, it is composed of two submodels. The first submodel is the HPS part that handles the embedding lookup (sparse submodel). The second part is the dense submodel. It consists of the interaction layer and the MLPs with linear layers. Those are run with NVIDIA TensorRT Triton backend. The communication between the submodels is managed efficiently with CUDA memory copies by Triton. This solution allows us to get the benefits of Merlin HPS for the sparse part as well as the latest performance optimizations for the linear and interaction layers offered by NVIDIA TensorRT. The overall architecture of this approach is depicted in Figure 2. <p align="center"> <img width="100%" src="./img/inference/hps_tensorrt_architecture.svg" /> <br> Figure 2. Overall architecture of the Merlin HPS + TensorRT ensemble for running large recommender inference. </p> ### Deployment process The deployment process consists of two steps: 1. Conversion. The purpose of conversion is to transform the checkpoint saved during training into a ready-to-serve model. 2. Configuration. Model configuration on Triton Inference Server that generates necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md). After deployment, the Triton inference server is used for the evaluation of the converted model in two steps: 1. Correctness tests. Produce results that are tested against given correctness thresholds. 2. Performance tests. Produce latency and throughput results for offline (static batching) and online (dynamic batching) scenarios. Refer to [Quick Start Guide](#quick-start-guide) for further instructions on performing these tests. ## Setup Ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [NVIDIA TensorFlow NGC container 22.02](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow) * [NVIDIA Triton Inference Server NGC container 22.02](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver) * [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html) * [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU ## Quick Start Guide The instructions below assume you have already cloned the repository, built the training docker container, preprocessed the Criteo 1TB dataset, run the training and saved a model checkpoint. If you haven't completed those steps, refer to the [Quick Start Guide for DLRM](DLRM.md#quick-start-guide) or the [Quick Start Guide to DCNv2](DCNv2.md#quick-start-guide), depending on which model you'd like to deploy. 1. Build the Merlin HPS docker container: ``` cd deployment/hps docker build -t hps_triton . ``` 2. Run the training docker container built during the training stage: ``` # set input variables checkpoint_path=<path_to_checkpoint_saved_during_training> deploy_path=<destination_path_of_the_triton_model_repository> dataset_path=<path_to_the_dataset> mkdir -p $deploy_path docker run -v $checkpoint_path:$checkpoint_path -v $deploy_path:$deploy_path -v $dataset_path:$dataset_path -it --rm --network=host --ipc=host \ --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 --gpus=all --cap-add SYS_NICE train_docker_image \ bash ``` 3. Convert the model checkpoint into a Triton model repository: ``` # set input variables inside the container checkpoint_path=<path_to_checkpoint_saved_during_training> deploy_path=<destination_path_of_the_triton_model_repository> dataset_path=<path_to_the_dataset> # run the deployment horovodrun -np 1 --mpi-args=--oversubscribe numactl --interleave=all \ python -m deployment.deploy --hps_gpucacheper 0.1 \ --checkpoint-dir $checkpoint_path --model-repository-path $deploy_path --num_gpus 1\ --fused_embedding --model-name dlrm --model-precision fp16 --dense-format trt\ --sparse-format hps ``` 4. In a separate terminal start the Triton Inference Server: ``` deploy_path=<destination_path_of_the_triton_model_repository> docker run -v $deploy_path:$deploy_path -it --rm --network=host --detach --ipc=host \ --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 --gpus=all hps_triton \ bash -c "tritonserver --model-repository=${deploy_path} --backend-config=hps,ps=${deploy_path}/sparse/sparse.json\ --pinned-memory-pool-byte-size=4000000000 --cuda-memory-pool-byte-size=0:2000000000 2>&1" ``` 5. Switch back to the first terminal with the training container. Warm up the server with some initial requests: ``` python -u -m deployment.evaluate_accuracy --max_steps 1000 --dataset_path $dataset_path \ --fused_embedding --model_name dlrm --test_on_train --batch_size 16384 --sparse_input_format hps ``` 6. Measure inference execution speed ``` python -u -m deployment.evaluate_latency --sparse-format hps --model-name dlrm --dataset_path $dataset_path \ --fused-embedding --measurement-request-count 50 --measurement-interval 5000 \ --batch-sizes 4096 --num-benchmark-samples 262144 ``` 7. Measure the prediction quality of the deployed model ``` python -u -m deployment.evaluate_accuracy --dataset_path $dataset_path --fused_embedding \ --model_name dlrm --batch_size 16384 --sparse_input_format hps" ``` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Offline scenario The offline scenario assumes the client and server are located on the same host. The tests uses: - tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used - single request is send from client to server with static size of batch #### Offline: DLRM on NVIDIA DGX A100 (1x A100 80GB), Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 3.64e+05 | 22 | 175 | 0 | 35 | 393 | 71 | 0 | 689 | 713 | 718 | 794 | 696 | | 1 | 1024 | 1 | 1.25e+06 | 23 | 169 | 0 | 46 | 506 | 69 | 0 | 787 | 849 | 1054 | 1128 | 813 | | 2 | 4096 | 1 | 2.33e+06 | 24 | 236 | 0 | 100 | 1276 | 114 | 0 | 1717 | 1748 | 1893 | 2408 | 1750 | | 3 | 16384 | 1 | 2.88e+06 | 86 | 648 | 0 | 337 | 4291 | 320 | 0 | 5527 | 5602 | 7016 | 8573 | 5682 | | 4 | 65536 | 1 | 3.36e+06 | 31 | 1210 | 0 | 1228 | 15777 | 1144 | 0 | 19093 | 19277 | 21757 | 24888 | 19390 | <img width="100%" src="./img/inference/merlin_hps_dlrm_dgx-a100-80gb_t3_fp32.svg" /> </details> #### Offline: DLRM on NVIDIA DGX A100 (1x A100 80GB), Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 4.01e+05 | 23 | 156 | 0 | 33 | 351 | 69 | 0 | 624 | 661 | 669 | 738 | 632 | | 1 | 1024 | 1 | 1.25e+06 | 23 | 211 | 0 | 52 | 456 | 68 | 0 | 786 | 807 | 1057 | 1130 | 810 | | 2 | 4096 | 1 | 2.62e+06 | 26 | 294 | 0 | 99 | 1028 | 109 | 0 | 1533 | 1568 | 1579 | 1743 | 1556 | | 3 | 16384 | 1 | 4.00e+06 | 27 | 379 | 0 | 328 | 3042 | 309 | 0 | 4001 | 4098 | 4627 | 5833 | 4085 | | 4 | 65536 | 1 | 4.30e+06 | 34 | 1660 | 0 | 1227 | 11150 | 1102 | 0 | 14941 | 15309 | 16989 | 20144 | 15173 | <img width="100%" src="./img/inference/merlin_hps_dlrm_dgx-a100-80gb_t3_fp16.svg" /> </details> #### Offline: DLRM on NVIDIA A30, Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 3.07e+05 | 22 | 194 | 0 | 42 | 512 | 57 | 0 | 801 | 832 | 1001 | 1103 | 827 | | 1 | 1024 | 1 | 9.60e+05 | 30 | 194 | 0 | 78 | 690 | 65 | 0 | 1040 | 1120 | 1154 | 1475 | 1057 | | 2 | 4096 | 1 | 1.38e+06 | 53 | 482 | 0 | 233 | 2014 | 181 | 0 | 2941 | 3009 | 3021 | 3059 | 2963 | | 3 | 16384 | 1 | 1.71e+06 | 47 | 467 | 0 | 894 | 7529 | 648 | 0 | 9534 | 9714 | 9783 | 11371 | 9585 | | 4 | 65536 | 1 | 1.79e+06 | 76 | 4207 | 1 | 2574 | 27369 | 2307 | 0 | 34512 | 39707 | 39914 | 61100 | 36534 | <img width="100%" src="./img/inference/merlin_hps_dlrm_a30-24gb_t3_fp32.svg" /> </details> #### Offline: DLRM on NVIDIA A30, Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 3.30e+05 | 21 | 210 | 0 | 37 | 447 | 54 | 0 | 757 | 773 | 790 | 1035 | 769 | | 1 | 1024 | 1 | 1.06e+06 | 22 | 220 | 0 | 95 | 567 | 58 | 0 | 955 | 978 | 986 | 1138 | 962 | | 2 | 4096 | 1 | 1.49e+06 | 36 | 664 | 0 | 244 | 1623 | 172 | 0 | 2735 | 2770 | 2781 | 2820 | 2739 | | 3 | 16384 | 1 | 2.17e+06 | 66 | 607 | 0 | 903 | 5357 | 606 | 0 | 7558 | 7633 | 7641 | 7662 | 7539 | | 4 | 65536 | 1 | 2.40e+06 | 73 | 3640 | 1 | 2584 | 18617 | 2292 | 0 | 25568 | 31138 | 31241 | 39514 | 27207 | <img width="100%" src="./img/inference/merlin_hps_dlrm_a30-24gb_t3_fp16.svg" /> </details> #### Offline: DLRM on NVIDIA T4, Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 1.33e+05 | 53 | 523 | 0 | 104 | 1156 | 75 | 0 | 1916 | 2120 | 2170 | 2295 | 1911 | | 1 | 1024 | 1 | 3.76e+05 | 50 | 405 | 0 | 131 | 1957 | 159 | 0 | 2697 | 2804 | 2836 | 2904 | 2702 | | 2 | 4096 | 1 | 4.41e+05 | 46 | 759 | 0 | 479 | 7384 | 610 | 0 | 9228 | 9511 | 9645 | 10538 | 9278 | | 3 | 16384 | 1 | 4.77e+05 | 48 | 1219 | 1 | 1865 | 29110 | 1942 | 0 | 33483 | 34759 | 35025 | 55576 | 34185 | | 4 | 65536 | 1 | 4.93e+05 | 54 | 4437 | 0 | 7400 | 113167 | 7262 | 0 | 131638 | 133320 | 133731 | 142058 | 132320 | <img width="100%" src="./img/inference/merlin_hps_dlrm_t4-16gb_t3_fp32.svg" /> </details> #### Offline: DLRM on NVIDIA T4, Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Model architecture | DLRM | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 2.36e+05 | 28 | 201 | 0 | 83 | 703 | 63 | 0 | 1039 | 1250 | 1355 | 1593 | 1078 | | 1 | 1024 | 1 | 4.92e+05 | 49 | 622 | 0 | 190 | 1112 | 95 | 0 | 2061 | 2220 | 2259 | 2324 | 2068 | | 2 | 4096 | 1 | 7.55e+05 | 43 | 657 | 0 | 483 | 3600 | 626 | 0 | 5402 | 5514 | 5533 | 5599 | 5409 | | 3 | 16384 | 1 | 8.73e+05 | 46 | 1120 | 0 | 1884 | 13703 | 1966 | 0 | 18175 | 19323 | 23458 | 29656 | 18719 | | 4 | 65536 | 1 | 9.34e+05 | 40 | 3691 | 0 | 7466 | 51644 | 7330 | 0 | 69254 | 71662 | 72009 | 86622 | 70171 | <img width="100%" src="./img/inference/merlin_hps_dlrm_t4-16gb_t3_fp16.svg" /> </details> #### Offline: DCNv2 on NVIDIA DGX A100 (1x A100 80GB), Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 2.63e+05 | 23 | 159 | 0 | 36 | 681 | 69 | 0 | 955 | 983 | 1098 | 1137 | 968 | | 1 | 1024 | 1 | 8.19e+05 | 22 | 204 | 0 | 50 | 897 | 68 | 0 | 1234 | 1254 | 1261 | 1384 | 1241 | | 2 | 4096 | 1 | 1.25e+06 | 33 | 349 | 0 | 107 | 2681 | 105 | 0 | 3204 | 3316 | 4108 | 4271 | 3275 | | 3 | 16384 | 1 | 1.31e+06 | 32 | 468 | 0 | 326 | 11346 | 329 | 0 | 12338 | 12459 | 14463 | 14674 | 12501 | | 4 | 65536 | 1 | 1.33e+06 | 35 | 1180 | 0 | 1260 | 45518 | 1183 | 0 | 48985 | 49121 | 49142 | 54691 | 49176 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_dgx-a100-80gb_t3_fp32.svg" /> </details> #### Offline: DCNv2 on NVIDIA DGX A100 (1x A100 80GB), Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 3.17e+05 | 23 | 172 | 0 | 36 | 501 | 69 | 0 | 797 | 809 | 815 | 942 | 801 | | 1 | 1024 | 1 | 1.03e+06 | 24 | 181 | 0 | 48 | 667 | 69 | 0 | 960 | 1018 | 1277 | 1337 | 989 | | 2 | 4096 | 1 | 1.85e+06 | 24 | 276 | 0 | 101 | 1708 | 101 | 0 | 2144 | 2184 | 2485 | 3562 | 2210 | | 3 | 16384 | 1 | 2.24e+06 | 82 | 429 | 0 | 327 | 6081 | 383 | 0 | 7056 | 7145 | 8028 | 9417 | 7302 | | 4 | 65536 | 1 | 2.45e+06 | 33 | 1262 | 0 | 1237 | 23144 | 1102 | 0 | 26602 | 26709 | 26800 | 33534 | 26778 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_dgx-a100-80gb_t3_fp16.svg" /> </details> #### Offline: DCNv2 on NVIDIA A30, Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 1.85e+05 | 22 | 207 | 0 | 42 | 1036 | 70 | 0 | 1355 | 1377 | 1388 | 1442 | 1377 | | 1 | 1024 | 1 | 5.64e+05 | 24 | 180 | 0 | 79 | 1458 | 66 | 0 | 1806 | 1824 | 1832 | 1866 | 1807 | | 2 | 4096 | 1 | 7.55e+05 | 57 | 323 | 0 | 245 | 4629 | 156 | 0 | 5399 | 5484 | 5519 | 5588 | 5410 | | 3 | 16384 | 1 | 8.16e+05 | 74 | 1249 | 1 | 899 | 17135 | 674 | 0 | 19579 | 20101 | 24995 | 27916 | 20032 | | 4 | 65536 | 1 | 8.79e+05 | 78 | 1603 | 1 | 3586 | 66689 | 2346 | 0 | 73906 | 74311 | 74558 | 85554 | 74303 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_a30-24gb_t3_fp32.svg" /> </details> #### Offline: DCNv2 on NVIDIA A30, Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 2.72e+05 | 30 | 177 | 0 | 37 | 636 | 55 | 0 | 918 | 961 | 1108 | 1144 | 935 | | 1 | 1024 | 1 | 7.02e+05 | 22 | 309 | 0 | 95 | 964 | 62 | 0 | 1436 | 1455 | 1462 | 1629 | 1452 | | 2 | 4096 | 1 | 1.14e+06 | 57 | 319 | 1 | 243 | 2788 | 166 | 0 | 3579 | 3630 | 3647 | 3672 | 3574 | | 3 | 16384 | 1 | 1.25e+06 | 72 | 1942 | 1 | 856 | 9644 | 626 | 0 | 13295 | 13556 | 13650 | 16335 | 13141 | | 4 | 65536 | 1 | 1.42e+06 | 80 | 2698 | 1 | 2700 | 38237 | 2331 | 0 | 44644 | 48730 | 49194 | 61910 | 46047 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_a30-24gb_t3_fp16.svg" /> </details> #### Offline: DCNv2 on NVIDIA T4, Merlin HPS + TensorRT with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP32 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 7.73e+04 | 46 | 523 | 0 | 101 | 2419 | 207 | 0 | 3272 | 3466 | 3528 | 3906 | 3296 | | 1 | 1024 | 1 | 1.04e+05 | 51 | 556 | 0 | 195 | 8733 | 243 | 0 | 9477 | 10197 | 11500 | 15047 | 9778 | | 2 | 4096 | 1 | 1.11e+05 | 63 | 936 | 0 | 473 | 34713 | 594 | 0 | 35969 | 38166 | 40363 | 55363 | 36779 | | 3 | 16384 | 1 | 1.13e+05 | 159 | 1216 | 0 | 1834 | 138852 | 1952 | 0 | 143232 | 145827 | 147995 | 150841 | 144013 | | 4 | 65536 | 1 | 1.12e+05 | 60 | 4961 | 0 | 7310 | 561876 | 7248 | 0 | 581850 | 585347 | 586993 | 593200 | 581455 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_t4-16gb_t3_fp32.svg" /> </details> #### Offline: DCNv2 on NVIDIA T4, Merlin HPS + TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Model architecture | DCNv2 | | Model size | 22B parameters | | Backend |NVDIA Merlin HPS + NVIDIA TensorRT| | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA Triton Ensemble (NVDIA Merlin HPS + NVIDIA TensorRT)| | Max batch size |65536| | Number of model instances |1| | Export Format | TensorFlow SavedModel| | NVIDIA TensorRT Capture CUDA Graph | Enabled| | Device Kind | gpu| <details><summary>Results Table</summary> | | Batch | Concurrency | Inferences/Second | Client Send | Network+Server Send/Recv | Server Queue | Server Compute Input | Server Compute Infer | Server Compute Output | Client Recv | p50 latency | p90 latency | p95 latency | p99 latency | avg latency | |---:|--------:|--------------:|--------------------:|--------------:|---------------------------:|---------------:|-----------------------:|-----------------------:|------------------------:|--------------:|--------------:|--------------:|--------------:|--------------:|--------------:| | 0 | 256 | 1 | 1.42e+05 | 52 | 362 | 0 | 74 | 1222 | 73 | 0 | 1778 | 1915 | 1961 | 2032 | 1783 | | 1 | 1024 | 1 | 3.27e+05 | 46 | 558 | 0 | 147 | 2097 | 264 | 0 | 3084 | 3241 | 3266 | 3584 | 3112 | | 2 | 4096 | 1 | 4.09e+05 | 47 | 728 | 0 | 474 | 8106 | 638 | 0 | 9993 | 10239 | 10318 | 10551 | 9993 | | 3 | 16384 | 1 | 4.30e+05 | 68 | 1695 | 0 | 1836 | 32338 | 1990 | 0 | 37402 | 39030 | 40043 | 50287 | 37927 | | 4 | 65536 | 1 | 4.23e+05 | 54 | 4446 | 0 | 7287 | 135833 | 7202 | 0 | 154310 | 157113 | 157725 | 161462 | 154822 | <img width="100%" src="./img/inference/merlin_hps_dcnv2_t4-16gb_t3_fp16.svg" /> </details> ## Advanced ### Latency explanation A typical Triton Inference Server pipeline can be broken down into the following steps: 1. The client serializes the inference request into a message and sends it to the server (Client Send). 2. The message travels over the network from the client to the server (Network). 3. The message arrives at the server and is deserialized (Server Receive). 4. The request is placed on the queue (Server Queue). 5. The request is removed from the queue and computed (Server Compute). 6. The completed request is serialized in a message and sent back to the client (Server Send). 7. The completed message then travels over the network from the server to the client (Network). 8. The completed message is deserialized by the client and processed as a completed inference request (Client Receive). Generally, for local clients, steps 1-4 and 6-8 will only occupy a small fraction of time compared to step 5. In distributed systems and online processing where the client and server side are connected through a network, the send and receive steps might have an impact on overall processing performance. In order to analyze the possible bottlenecks, detailed charts are presented in online scenario cases. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads, even on the same hardware, with frequent updates to our software stack. For our latest performance data, refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog April 2023 - Initial release ### Known issues - There are no known issues with this model.
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils
utils
cugraph
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def import_cugraph(): """ Lazy import of cugraph. """ import cugraph return cugraph
PyTorch/Detection/Efficientdet/effdet/layers
layers
drop
""" DropBlock, DropPath PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. Papers: DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) Code: DropBlock impl inspired by two Tensorflow impl that I liked: - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py Hacked together by / Copyright 2020 Ross Wightman """ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F def drop_block_2d( x, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. This layer has been tested on a few training runs with success, but needs further validation and possibly optimization for lower runtime impact. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) # seed_drop_rate, the gamma parameter gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) # Forces the block to be inside the feature map. w_i, h_i = torch.meshgrid(torch.arange(W).to(x.device), torch.arange(H).to(x.device)) valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) if batchwise: # one mask for whole batch, quite a bit faster uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) else: uniform_noise = torch.rand_like(x) block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) block_mask = -F.max_pool2d( -block_mask, kernel_size=clipped_block_size, # block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) else: x = x * block_mask + normal_noise * (1 - block_mask) else: normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x def drop_block_fast_2d( x: torch.Tensor, drop_prob: float = 0.1, block_size: int = 7, gamma_scale: float = 1.0, with_noise: bool = False, inplace: bool = False, batchwise: bool = False): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid block mask at edges. """ B, C, H, W = x.shape total_size = W * H clipped_block_size = min(block_size, min(W, H)) gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( (W - block_size + 1) * (H - block_size + 1)) if batchwise: # one mask for whole batch, quite a bit faster block_mask = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) < gamma else: # mask per batch element block_mask = torch.rand_like(x) < gamma block_mask = F.max_pool2d( block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) if with_noise: normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) if inplace: x.mul_(1. - block_mask).add_(normal_noise * block_mask) else: x = x * (1. - block_mask) + normal_noise * block_mask else: block_mask = 1 - block_mask normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(dtype=x.dtype) if inplace: x.mul_(block_mask * normalize_scale) else: x = x * block_mask * normalize_scale return x class DropBlock2d(nn.Module): """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf """ def __init__(self, drop_prob=0.1, block_size=7, gamma_scale=1.0, with_noise=False, inplace=False, batchwise=False, fast=True): super(DropBlock2d, self).__init__() self.drop_prob = drop_prob self.gamma_scale = gamma_scale self.block_size = block_size self.with_noise = with_noise self.inplace = inplace self.batchwise = batchwise self.fast = fast # FIXME finish comparisons of fast vs not def forward(self, x): if not self.training or not self.drop_prob: return x if self.fast: return drop_block_fast_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) else: return drop_block_2d( x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob random_tensor = keep_prob + torch.rand((x.size()[0], 1, 1, 1), dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training)
TensorFlow/Segmentation/MaskRCNN
MaskRCNN
README
Both TensorFlow 1.x and TensorFlow 2.x versions of Mask-RCNN are located in [TensorFlow2/Segmentation/MaskRCNN folder](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Segmentation/MaskRCNN). Mask-RCNN model for TensorFlow1 is no longer maintained.
PyTorch/Forecasting/TFT/triton
triton
metrics
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pandas as pd import numpy as np import pickle import torch from criterions import QuantileLoss from triton.deployment_toolkit.core import BaseMetricsCalculator def update_argparser(parser): parser.add_argument("--dataset", type=str, help="Path to dataset to be used", required=True) parser.add_argument("--checkpoint", type=str, help="Path to checkpoint to be used", required=True) def _unscale_per_id(config, values, ids, scalers): # values = values.cpu().numpy() num_horizons = config.example_length - config.encoder_length + 1 flat_values = pd.DataFrame( values, columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)] ) flat_values['id'] = ids df_list = [] for idx, group in flat_values.groupby('id'): scaler = scalers[idx] group_copy = group.copy() for col in group_copy.columns: if not 'id' in col: _col = np.expand_dims(group_copy[col].values, -1) _t_col = scaler.inverse_transform(_col)[:,-1] group_copy[col] = _t_col df_list.append(group_copy) flat_values = pd.concat(df_list, axis=0) flat_values = flat_values[[col for col in flat_values if not 'id' in col]] flat_tensor = torch.from_numpy(flat_values.values) return flat_tensor def _unscale(config, values, scaler): # values = values.cpu().numpy() num_horizons = config.example_length - config.encoder_length + 1 flat_values = pd.DataFrame( values, columns=[f't{j}' for j in range(num_horizons - values.shape[1], num_horizons)] ) for col in flat_values.columns: if not 'id' in col: _col = np.expand_dims(flat_values[col].values, -1) _t_col = scaler.inverse_transform(_col)[:,-1] flat_values[col] = _t_col flat_values = flat_values[[col for col in flat_values if not 'id' in col]] flat_tensor = torch.from_numpy(flat_values.values) return flat_tensor class MetricsCalculator(BaseMetricsCalculator): def __init__(self, dataset, checkpoint): state_dict = torch.load(os.path.join(checkpoint, "checkpoint.pt")) self.config = state_dict['config'] self.predictions = [] self.targets = [] self.ids = [] self.scalers = pickle.load(open(os.path.join(dataset, 'tgt_scalers.bin'), 'rb')) @property def metrics(self): targets = np.concatenate(self.targets, axis=0) # targets = torch.cat(self.targets, dim=0) predictions = np.concatenate(self.predictions, axis=0) # predictions = torch.cat(self.predictions, dim=0) ids = np.concatenate(self.ids, axis=0) if self.config.scale_per_id: unscaled_predictions = torch.stack( [_unscale_per_id(self.config, predictions[:,:,i], ids, self.scalers) for i in range(len(self.config.quantiles))], dim=-1) unscaled_targets = _unscale_per_id(self.config, targets[:,:,0], ids, self.scalers).unsqueeze(-1) else: ids = None unscaled_predictions = torch.stack( [_unscale(self.config, predictions[:,:,i], self.scalers['']) for i in range(len(self.config.quantiles))], dim=-1) unscaled_targets = _unscale(self.config, targets[:,:,0], self.scalers['']).unsqueeze(-1) losses = QuantileLoss(self.config)(unscaled_predictions, unscaled_targets) normalizer = unscaled_targets.abs().mean() q_risk = 2 * losses / normalizer return {'test_p10': q_risk[0].cpu().numpy(), 'test_p50': q_risk[1].cpu().numpy(), 'test_p90': q_risk[2].cpu().numpy()} def update( self, ids, y_pred, x, y_real, ): #can probably just pass all of this to the evaluator main class self.predictions.append(y_pred["target__0"]) self.targets.append(y_real['target__0'][:,:,0][:,:,np.newaxis]) self.ids.append(ids) # return self.metrics
TensorFlow2/Recommendation/WideAndDeep/data/outbrain
outbrain
embedding_sizes
{"document_id": 128, "ad_id": 128, "document_id_promo": 128, "source_id_promo": 64, "source_id": 64, "geo_location": 64, "advertiser_id": 64, "geo_location_state": 64, "publisher_id_promo": 64, "publisher_id": 64, "geo_location_country": 64, "platform": 19, "campaign_id": 128, "topic_id_list": 64, "entity_id_list": 64, "category_id_list": 64}
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/denoiser
denoiser
denoiserInstance
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_DENOISERINSTANCE_H #define TT2I_DENOISERINSTANCE_H #include "cudaMemory.h" #include "denoiserStreamingInstance.h" #include "timedObject.h" namespace nvinfer1 { class ICudaEngine; } // namespace nvinfer1 namespace tts { class DenoiserInstance : public TimedObject { public: /** * @brief Create a new denoiser. * * @param sampleNoise The audio sample of what should be "noise" to be * removed. * @param sampleLength The number of samples in the "noise". * @param filterLength The filter length. * @param overlapLength The length of overlap between filters. * @param winLength The length of the window. */ DenoiserInstance(TRTPtr<nvinfer1::ICudaEngine>&& engine); /** * @brief Perform inference using the denoiser. * * @param batchSize The number of items in the batch. * @param inputDevice The input tensor on the device. * @param inputSpacing The spacing between the start of items in the batch. * @param inputLength The length of each input. * @param outputDevice The output tensor on the device. */ void infer( const int batchSize, const float* inputDevice, int inputSpacing, const int* inputLength, float* outputDevice); private: DenoiserStreamingInstance mStreamingInstance; CudaMemory<float> mInBufferDevice; CudaMemory<float> mOutBufferDevice; }; } // namespace tts #endif
PyTorch/LanguageModeling/BART/bart/modeling
modeling
modeling_t5
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 Mesh TensorFlow authors, T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch T5 model. """ import copy import math import os import warnings import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from bart.configuration.configuration_t5 import T5Config from utils.file_utils import ( DUMMY_INPUTS, DUMMY_MASK, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from bart.modeling.modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, Seq2SeqLMOutput, Seq2SeqModelOutput from bart.modeling.modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" #################################################### # This dict contains ids and associated url # for the pretrained weights provided with the models #################################################### T5_PRETRAINED_MODEL_ARCHIVE_LIST = [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", # See all T5 models at https://huggingface.co/models?filter=t5 ] #################################################### # This is a conversion method from TF 1.0 to PyTorch # More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28 #################################################### def load_tf_weights_in_t5(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] tf_weights = {} for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) tf_weights[name] = array for txt_name in names: name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue if "_slot_" in name[-1]: logger.info("Skipping {}".format("/".join(name))) tf_weights.pop(txt_name, None) continue pointer = model array = tf_weights[txt_name] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") elif scope_names[0] == "self_attention": pointer = getattr(pointer, "layer") pointer = pointer[0] elif scope_names[0] == "enc_dec_attention": pointer = getattr(pointer, "layer") pointer = pointer[1] elif scope_names[0] == "dense_relu_dense": pointer = getattr(pointer, "layer") pointer = pointer[2] elif scope_names[0] == "rms_norm": if hasattr(pointer, "layer_norm"): pointer = getattr(pointer, "layer_norm") elif hasattr(pointer, "final_layer_norm"): pointer = getattr(pointer, "final_layer_norm") elif scope_names[0] == "scale": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") elif scope_names[0] == "decoder" and name[1] == "logits": continue elif scope_names[0] == "logits": pointer = getattr(pointer, "lm_head") elif scope_names[0] == "wi" and len(scope_names) > 1 and scope_names[1].isdigit(): pointer = getattr(pointer, f"wi_{scope_names[1]}") continue else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if scope_names[0] not in ["kernel", "scale", "embedding"]: pointer = getattr(pointer, "weight") if scope_names[0] != "embedding": logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, name)) array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array.astype(np.float32)) tf_weights.pop(txt_name, None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys()))) return model #################################################### # PyTorch Models are constructed by sub-classing # - torch.nn.Module for the layers and # - PreTrainedModel for the models (it-self a sub-class of torch.nn.Module) #################################################### PARALLELIZE_DOCSTRING = r""" This is an experimental feature and is a subject to change at a moment's notice. Uses a device map to distribute attention modules of the model across several devices. If no device map is given, it will evenly distribute blocks across all devices. Args: device_map (:obj:`Dict[int, list]`, optional, defaults to None): A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always automatically mapped to the first device (for esoteric reasons). That means that the first device should have fewer attention modules mapped to it than other devices. For reference, the t5 models have the following number of attention modules: - t5-small: 6 - t5-base: 12 - t5-large: 24 - t5-3b: 24 - t5-11b: 24 Example:: # Here is an example of a device map on a machine with 4 GPUs using t5-3b, which has a total of 24 attention modules: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) """ DEPARALLELIZE_DOCSTRING = r""" Moves the model to cpu from a model parallel state. Example:: # On a 4 GPU machine with t5-3b: model = T5ForConditionalGeneration.from_pretrained('t5-3b') device_map = {0: [0, 1, 2], 1: [3, 4, 5, 6, 7, 8, 9], 2: [10, 11, 12, 13, 14, 15, 16], 3: [17, 18, 19, 20, 21, 22, 23]} model.parallelize(device_map) # Splits the model across several devices model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() """ class T5LayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ Construct a layernorm module in the T5 style No bias and no subtraction of mean. """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): # layer norm should always be calculated in float32 variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into float16 if necessary if self.weight.dtype == torch.float16: hidden_states = hidden_states.to(torch.float16) return self.weight * hidden_states class T5DenseReluDense(nn.Module): def __init__(self, config): super().__init__() self.wi = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): hidden_states = self.wi(hidden_states) hidden_states = F.relu(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5DenseGatedGeluDense(nn.Module): def __init__(self, config): super().__init__() self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False) self.wo = nn.Linear(config.d_ff, config.d_model, bias=False) self.dropout = nn.Dropout(config.dropout_rate) self.gelu_act = ACT2FN["gelu_new"] def forward(self, hidden_states): hidden_gelu = self.gelu_act(self.wi_0(hidden_states)) hidden_linear = self.wi_1(hidden_states) hidden_states = hidden_gelu * hidden_linear hidden_states = self.dropout(hidden_states) hidden_states = self.wo(hidden_states) return hidden_states class T5LayerFF(nn.Module): def __init__(self, config): super().__init__() if config.feed_forward_proj == "relu": self.DenseReluDense = T5DenseReluDense(config) elif config.feed_forward_proj == "gated-gelu": self.DenseReluDense = T5DenseGatedGeluDense(config) else: raise ValueError( f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`" ) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward(self, hidden_states): forwarded_states = self.layer_norm(hidden_states) forwarded_states = self.DenseReluDense(forwarded_states) hidden_states = hidden_states + self.dropout(forwarded_states) return hidden_states class T5Attention(nn.Module): def __init__(self, config: T5Config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.has_relative_attention_bias = has_relative_attention_bias self.relative_attention_num_buckets = config.relative_attention_num_buckets self.d_model = config.d_model self.key_value_proj_dim = config.d_kv self.n_heads = config.num_heads self.dropout = config.dropout_rate self.inner_dim = self.n_heads * self.key_value_proj_dim # Mesh TensorFlow initialization to avoid scaling before softmax self.q = nn.Linear(self.d_model, self.inner_dim, bias=False) self.k = nn.Linear(self.d_model, self.inner_dim, bias=False) self.v = nn.Linear(self.d_model, self.inner_dim, bias=False) self.o = nn.Linear(self.inner_dim, self.d_model, bias=False) if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.n_heads, self.key_value_proj_dim, self.pruned_heads ) # Prune linear layers self.q = prune_linear_layer(self.q, index) self.k = prune_linear_layer(self.k, index) self.v = prune_linear_layer(self.v, index) self.o = prune_linear_layer(self.o, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.inner_dim = self.key_value_proj_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): """ Adapted from Mesh Tensorflow: https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593 Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_postion_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_postion_if_large = torch.min( relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large) return relative_buckets def compute_bias(self, query_length, key_length): """ Compute binned relative position bias """ context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position # shape (query_length, key_length) relative_position_bucket = self._relative_position_bucket( relative_position, # shape (query_length, key_length) bidirectional=(not self.is_decoder), num_buckets=self.relative_attention_num_buckets, ) relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads) values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length) return values def forward( self, hidden_states, mask=None, key_value_states=None, position_bias=None, past_key_value=None, layer_head_mask=None, query_length=None, use_cache=False, output_attentions=False, ): """ Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states). """ # Input is (batch_size, seq_length, dim) # Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length) # past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head) batch_size, seq_length = hidden_states.shape[:2] real_seq_length = seq_length if past_key_value is not None: assert ( len(past_key_value) == 2 ), "past_key_value should have 2 past states: keys and values. Got {} past states".format( len(past_key_value) ) real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length key_length = real_seq_length if key_value_states is None else key_value_states.shape[1] def shape(states): """ projection """ return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2) def unshape(states): """ reshape """ return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim) def project(hidden_states, proj_layer, key_value_states, past_key_value): """ projects hidden states correctly to key/query states """ if key_value_states is None: # self-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(hidden_states)) elif past_key_value is None: # cross-attn # (batch_size, n_heads, seq_length, dim_per_head) hidden_states = shape(proj_layer(key_value_states)) if past_key_value is not None: if key_value_states is None: # self-attn # (batch_size, n_heads, key_length, dim_per_head) hidden_states = torch.cat([past_key_value, hidden_states], dim=2) else: # cross-attn hidden_states = past_key_value return hidden_states # get query states query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head) # get key/value states key_states = project( hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None ) value_states = project( hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None ) # compute scores scores = torch.matmul( query_states, key_states.transpose(3, 2) ) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9 if position_bias is None: if not self.has_relative_attention_bias: position_bias = torch.zeros( (1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype ) else: position_bias = self.compute_bias(real_seq_length, key_length) # if key and values are already calculated # we want only the last query position bias if past_key_value is not None: position_bias = position_bias[:, :, -seq_length:, :] if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) scores += position_bias attn_weights = F.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) attn_weights = F.dropout( attn_weights, p=self.dropout, training=self.training ) # (batch_size, n_heads, seq_length, key_length) # Mask heads if we want to if layer_head_mask is not None: attn_weights = attn_weights * layer_head_mask attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim) attn_output = self.o(attn_output) present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None outputs = (attn_output,) + (present_key_value_state,) + (position_bias,) if output_attentions: outputs = outputs + (attn_weights,) return outputs class T5LayerSelfAttention(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.SelfAttention( normed_hidden_states, mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = hidden_states + self.dropout(attention_output[0]) outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them return outputs class T5LayerCrossAttention(nn.Module): def __init__(self, config): super().__init__() self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False) self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) def forward( self, hidden_states, key_value_states, attention_mask=None, position_bias=None, layer_head_mask=None, past_key_value=None, use_cache=False, query_length=None, output_attentions=False, ): normed_hidden_states = self.layer_norm(hidden_states) attention_output = self.EncDecAttention( normed_hidden_states, mask=attention_mask, key_value_states=key_value_states, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, query_length=query_length, output_attentions=output_attentions, ) layer_output = hidden_states + self.dropout(attention_output[0]) outputs = (layer_output,) + attention_output[1:] # add attentions if we output them return outputs class T5Block(nn.Module): def __init__(self, config, has_relative_attention_bias=False): super().__init__() self.is_decoder = config.is_decoder self.layer = nn.ModuleList() self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias)) if self.is_decoder: self.layer.append(T5LayerCrossAttention(config)) self.layer.append(T5LayerFF(config)) def forward( self, hidden_states, attention_mask=None, position_bias=None, encoder_hidden_states=None, encoder_attention_mask=None, encoder_decoder_position_bias=None, layer_head_mask=None, encoder_layer_head_mask=None, past_key_value=None, use_cache=False, output_attentions=False, return_dict=True, ): if past_key_value is not None: assert self.is_decoder, "Only decoder can use `past_key_values`" expected_num_past_key_values = 2 if encoder_hidden_states is None else 4 error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format( expected_num_past_key_values, "2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "", len(past_key_value), ) assert len(past_key_value) == expected_num_past_key_values, error_message self_attn_past_key_value = past_key_value[:2] cross_attn_past_key_value = past_key_value[2:] else: self_attn_past_key_value, cross_attn_past_key_value = None, None self_attention_outputs = self.layer[0]( hidden_states, attention_mask=attention_mask, position_bias=position_bias, layer_head_mask=layer_head_mask, past_key_value=self_attn_past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights # clamp inf values to enable fp16 training if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention # if using past key value states. Need to inject it here if present_key_value_state is not None: query_length = present_key_value_state[0].shape[2] else: query_length = None cross_attention_outputs = self.layer[1]( hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, position_bias=encoder_decoder_position_bias, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, query_length=query_length, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = cross_attention_outputs[0] if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] # Keep cross-attention outputs and relative position weights attention_outputs = attention_outputs + cross_attention_outputs[2:] # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) if torch.isinf(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) outputs = outputs + (present_key_value_state,) + attention_outputs return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) class T5PreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = T5Config load_tf_weights = load_tf_weights_in_t5 base_model_prefix = "transformer" is_parallelizable = True @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "decoder_input_ids": input_ids, "input_ids": input_ids, "decoder_attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """ Initialize the weights """ factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance(module, (T5Model, T5ForConditionalGeneration, T5EncoderModel)): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) elif isinstance(module, T5DenseReluDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedGeluDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model ** -0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5)) def _shift_right(self, input_ids): decoder_start_token_id = self.config.decoder_start_token_id pad_token_id = self.config.pad_token_id assert ( decoder_start_token_id is not None ), "self.model.config.decoder_start_token_id has to be defined. In T5 it is usually set to the pad_token_id. See T5 docs for more information" # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" return shifted_input_ids class T5Stack(T5PreTrainedModel): def __init__(self, config, embed_tokens=None): super().__init__(config) self.embed_tokens = embed_tokens self.is_decoder = config.is_decoder self.block = nn.ModuleList( [T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)] ) self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon) self.dropout = nn.Dropout(config.dropout_rate) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): # Check validity of device_map self.device_map = ( get_device_map(len(self.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.block)) self.model_parallel = True self.first_device = "cpu" if "cpu" in self.device_map.keys() else "cuda:" + str(min(self.device_map.keys())) self.last_device = "cuda:" + str(max(self.device_map.keys())) # Load onto devices for k, v in self.device_map.items(): for layer in v: cuda_device = "cuda:" + str(k) self.block[layer] = self.block[layer].to(cuda_device) # Set embed_tokens to first layer self.embed_tokens = self.embed_tokens.to(self.first_device) # Set final layer norm to last device self.final_layer_norm = self.final_layer_norm.to(self.last_device) @add_start_docstrings(PARALLELIZE_DOCSTRING) def deparallelize(self): self.model_parallel = False self.device_map = None self.first_device = "cpu" self.last_device = "cpu" for i in range(len(self.block)): self.block[i] = self.block[i].to("cpu") self.embed_tokens = self.embed_tokens.to("cpu") self.final_layer_norm = self.final_layer_norm.to("cpu") torch.cuda.empty_cache() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, new_embeddings): self.embed_tokens = new_embeddings def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, inputs_embeds=None, head_mask=None, encoder_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # Model parallel if self.model_parallel: torch.cuda.set_device(self.first_device) self.embed_tokens = self.embed_tokens.to(self.first_device) use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if self.is_decoder else "" raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds") if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape # required mask seq length can be calculated via length of past mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length if use_cache is True: assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format( self ) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device) if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None: encoder_seq_length = encoder_hidden_states.shape[1] encoder_attention_mask = torch.ones( batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long ) # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device) if self.is_decoder and encoder_attention_mask is not None: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) encoder_head_mask = self.get_head_mask(encoder_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None position_bias = None encoder_decoder_position_bias = None hidden_states = self.dropout(inputs_embeds) for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)): layer_head_mask = head_mask[i] encoder_layer_head_mask = encoder_head_mask[i] # Model parallel if self.model_parallel: torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if position_bias is not None: position_bias = position_bias.to(hidden_states.device) if encoder_hidden_states is not None: encoder_hidden_states = encoder_hidden_states.to(hidden_states.device) if encoder_extended_attention_mask is not None: encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device) if encoder_decoder_position_bias is not None: encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device) if layer_head_mask is not None: layer_head_mask = layer_head_mask.to(hidden_states.device) if encoder_layer_head_mask is not None: encoder_layer_head_mask = encoder_layer_head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask=extended_attention_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, encoder_layer_head_mask=encoder_layer_head_mask, past_key_value=past_key_value, use_cache=use_cache, output_attentions=output_attentions, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias) hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention weights), # (self-attention position bias), (cross-attention weights), (cross-attention position bias) position_bias = layer_outputs[2] if self.is_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) if output_attentions: all_attentions = all_attentions + (layer_outputs[3],) if self.is_decoder: all_cross_attentions = all_cross_attentions + (layer_outputs[5],) # Model Parallel: If it's the last layer for that device, put things on the next device if self.model_parallel: for k, v in self.device_map.items(): if i == v[-1] and "cuda:" + str(k) != self.last_device: hidden_states = hidden_states.to("cuda:" + str(k + 1)) hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) T5_START_DOCSTRING = r""" The T5 model was proposed in `Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer <https://arxiv.org/abs/1910.10683>`__ by Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu. It's an encoder decoder transformer pre-trained in a text-to-text denoising generative setting. This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.T5Config`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ T5_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using :class:`~transformers.T5Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for detail. `What are input IDs? <../glossary.html#input-ids>`__ To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training <./t5.html#training>`__. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ T5 uses the :obj:`pad_token_id` as the starting token for :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). To know more on how to prepare :obj:`decoder_input_ids` for pretraining take a look at `T5 Training <./t5.html#training>`__. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_input_ids` takes the value of :obj:`input_ids`. decoder_attention_mask (:obj:`torch.BoolTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. in the decoder Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, :obj:`optional`: `hidden_states`, :obj:`optional`: `attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ T5_ENCODER_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. T5 is a model with relative position embeddings so you should be able to pad the inputs on both the right and the left. Indices can be obtained using :class:`~transformers.T5Tokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for detail. To know more on how to prepare :obj:`input_ids` for pretraining take a look a `T5 Training <./t5.html#training>`__. attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ # Warning messafe for FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ @add_start_docstrings( "The bare T5 Model transformer outputting raw hidden-states" "without any specific head on top.", T5_START_DOCSTRING, ) class T5Model(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import T5Tokenizer, T5Model >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5Model.from_pretrained('t5-small') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings("""T5 Model with a `language modeling` head on top. """, T5_START_DOCSTRING) class T5ForConditionalGeneration(T5PreTrainedModel): _keys_to_ignore_on_load_missing = [ r"encoder\.embed_tokens\.weight", r"decoder\.embed_tokens\.weight", r"lm_head\.weight", ] _keys_to_ignore_on_load_unexpected = [ r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight", ] def __init__(self, config): super().__init__(config) self.model_dim = config.d_model self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) decoder_config = copy.deepcopy(config) decoder_config.is_decoder = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = T5Stack(decoder_config, self.shared) self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.decoder.parallelize(self.device_map) self.lm_head = self.lm_head.to(self.decoder.first_device) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.decoder.deparallelize() self.encoder = self.encoder.to("cpu") self.decoder = self.decoder.to("cpu") self.lm_head = self.lm_head.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def get_output_embeddings(self): return self.lm_head def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Returns: Examples:: >>> from transformers import T5Tokenizer, T5ForConditionalGeneration >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5ForConditionalGeneration.from_pretrained('t5-small') >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids >>> outputs = model(input_ids=input_ids, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits >>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model.generate(input_ids) """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask # Encode if needed (training, first prediction pass) if encoder_outputs is None: # Convert encoder inputs in embeddings if needed encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) hidden_states = encoder_outputs[0] if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # If decoding with past key value states, only the last tokens # should be given as an input if past_key_values is not None: assert labels is None, "Decoder should not use cached key value states when training." if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_inputs_embeds is not None: decoder_inputs_embeds = decoder_inputs_embeds[:, -1:] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.decoder.first_device) hidden_states = hidden_states.to(self.decoder.first_device) if decoder_input_ids is not None: decoder_input_ids = decoder_input_ids.to(self.decoder.first_device) if attention_mask is not None: attention_mask = attention_mask.to(self.decoder.first_device) if decoder_attention_mask is not None: decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device) # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.encoder.first_device) self.lm_head = self.lm_head.to(self.encoder.first_device) sequence_output = sequence_output.to(self.lm_head.weight.device) if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim ** -0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666 if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "decoder_input_ids": input_ids, "past_key_values": past, "encoder_outputs": encoder_outputs, "attention_mask": attention_mask, "use_cache": use_cache, } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return self._shift_right(labels) def _reorder_cache(self, past, beam_idx): # if decoder past is not included in output # speedy decoding is disabled and no need to reorder if past is None: logger.warning("You might want to consider setting `use_cache=True` to speed up decoding") return past reordered_decoder_past = () for layer_past_states in past: # get the correct batch idx from layer past batch dim # batch dim of `past` is at 2nd position reordered_layer_past_states = () for layer_past_state in layer_past_states: # need to set correct `past` for each of the four key / value states reordered_layer_past_states = reordered_layer_past_states + ( layer_past_state.index_select(0, beam_idx), ) assert reordered_layer_past_states[0].shape == layer_past_states[0].shape assert len(reordered_layer_past_states) == len(layer_past_states) reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,) return reordered_decoder_past @add_start_docstrings( "The bare T5 Model transformer outputting encoder's raw hidden-states" "without any specific head on top.", T5_START_DOCSTRING, ) class T5EncoderModel(T5PreTrainedModel): authorized_missing_keys = [ r"encoder\.embed_tokens\.weight", ] def __init__(self, config: T5Config): super().__init__(config) self.shared = nn.Embedding(config.vocab_size, config.d_model) encoder_config = copy.deepcopy(config) encoder_config.use_cache = False encoder_config.is_encoder_decoder = False self.encoder = T5Stack(encoder_config, self.shared) self.init_weights() # Model parallel self.model_parallel = False self.device_map = None @add_start_docstrings(PARALLELIZE_DOCSTRING) def parallelize(self, device_map=None): self.device_map = ( get_device_map(len(self.encoder.block), range(torch.cuda.device_count())) if device_map is None else device_map ) assert_device_map(self.device_map, len(self.encoder.block)) self.encoder.parallelize(self.device_map) self.model_parallel = True @add_start_docstrings(DEPARALLELIZE_DOCSTRING) def deparallelize(self): self.encoder.deparallelize() self.encoder = self.encoder.to("cpu") self.model_parallel = False self.device_map = None torch.cuda.empty_cache() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) def get_encoder(self): return self.encoder def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Example:: >>> from transformers import T5Tokenizer, T5EncoderModel >>> tokenizer = T5Tokenizer.from_pretrained('t5-small') >>> model = T5EncoderModel.from_pretrained('t5-small') >>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1 >>> outputs = model(input_ids=input_ids) >>> last_hidden_states = outputs.last_hidden_state """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return encoder_outputs
PyTorch/SpeechRecognition/QuartzNet/common
common
sampler
import torch import numpy as np from torch.utils.data.sampler import Sampler class DistributedSampler(Sampler): def __init__(self, dataset, batch_size, world_size, rank): """ Constructor for the DistributedSampler. :param dataset: dataset :param batch_size: local batch size :param world_size: number of distributed workers :param rank: rank of the current process """ self.dataset = dataset self.world_size = world_size self.rank = rank self.epoch = 0 self.batch_size = batch_size self.global_batch_size = batch_size * world_size self.data_len = len(self.dataset) self.num_samples = self.data_len // self.global_batch_size \ * self.global_batch_size def distribute_batches(self, indices): """ Assigns batches to workers. Consecutive ranks are getting consecutive batches. :param indices: torch.tensor with batch indices """ assert len(indices) == self.num_samples indices = indices.view(-1, self.batch_size) indices = indices[self.rank::self.world_size].contiguous() indices = indices.view(-1) indices = indices.tolist() assert len(indices) == self.num_samples // self.world_size return indices def reshuffle_batches(self, indices, rng): """ Permutes global batches :param indices: torch.tensor with batch indices :param rng: instance of torch.Generator """ indices = indices.view(-1, self.global_batch_size) num_batches = indices.shape[0] order = torch.randperm(num_batches, generator=rng) indices = indices[order, :] indices = indices.view(-1) return indices def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) # generate permutation indices = torch.randperm(self.data_len, generator=rng) # make indices evenly divisible by (batch_size * world_size) indices = indices[:self.num_samples] # assign batches to workers indices = self.distribute_batches(indices) return iter(indices) def set_epoch(self, epoch): """ Sets current epoch index. Epoch index is used to seed RNG in __iter__() function. :param epoch: index of current epoch """ self.epoch = epoch def __len__(self): return self.num_samples // self.world_size class BucketingSampler(DistributedSampler): def __init__(self, dataset, batch_size, num_buckets, world_size, rank): """ Bucketing sampler with approx. equally-sized buckets. :param dataset: dataset :param batch_size: local batch size :param seeds: list of seeds, one seed for each training epoch :param num_buckets: number of buckets :param world_size: number of distributed workers :param rank: rank of the current process """ super().__init__(dataset, batch_size, world_size, rank) self.num_buckets = num_buckets len_ids = np.argsort([sample['duration'] for sample in dataset.samples]) self.buckets = [torch.from_numpy(t) for t in np.array_split(len_ids, num_buckets)] global_bs = self.global_batch_size def __iter__(self): g = torch.Generator() g.manual_seed(self.epoch) global_bsz = self.global_batch_size indices = [] for bid in range(self.num_buckets): # random shuffle within current bucket perm = torch.randperm(len(self.buckets[bid]), generator=g) bucket_indices = self.buckets[bid][perm] # add samples from current bucket to indices for current epoch indices.append(bucket_indices) indices = torch.cat(indices) # make indices evenly divisible by global batch size length = len(indices) // global_bsz * global_bsz indices = indices[:length] assert len(indices) % self.global_batch_size == 0 # perform global reshuffle of all global batches indices = self.reshuffle_batches(indices, g) # distribute batches to individual workers indices = self.distribute_batches(indices) return iter(indices)
PyTorch/Classification/ConvNets/triton/deployment_toolkit/library
library
onnx
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.optimizer import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE # pytype: enable=import-error from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple([_get_dim(d) for d in shape.dim]) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path]) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection
object_detection
shape_utils
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utils used to manipulate tensor shapes.""" import tensorflow as tf def combined_static_and_dynamic_shape(tensor): """Returns a list containing static and dynamic values for the dimensions. Returns a list of static and dynamic values for shape dimensions. This is useful to preserve static shapes when available in reshape operation. Args: tensor: A tensor of any type. Returns: A list of size tensor.shape.ndims containing integers or a scalar tensor. """ static_tensor_shape = tensor.shape.as_list() dynamic_tensor_shape = tf.shape(input=tensor) combined_shape = [] for index, dim in enumerate(static_tensor_shape): if dim is not None: combined_shape.append(dim) else: combined_shape.append(dynamic_tensor_shape[index]) return combined_shape def pad_or_clip_nd(tensor, output_shape): """Pad or Clip given tensor to the output shape. Args: tensor: Input tensor to pad or clip. output_shape: A list of integers / scalar tensors (or None for dynamic dim) representing the size to pad or clip each dimension of the input tensor. Returns: Input tensor padded and clipped to the output shape. """ tensor_shape = tf.shape(input=tensor) clip_size = [ tf.where(tensor_shape[i] - shape > 0, shape, -1) if shape is not None else -1 for i, shape in enumerate(output_shape) ] clipped_tensor = tf.slice( tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size) # Pad tensor if the shape of clipped tensor is smaller than the expected # shape. clipped_tensor_shape = tf.shape(input=clipped_tensor) trailing_paddings = [ shape - clipped_tensor_shape[i] if shape is not None else 0 for i, shape in enumerate(output_shape) ] paddings = tf.stack( [ tf.zeros(len(trailing_paddings), dtype=tf.int32), trailing_paddings ], axis=1) padded_tensor = tf.pad(tensor=clipped_tensor, paddings=paddings) output_static_shape = [ dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape ] padded_tensor.set_shape(output_static_shape) return padded_tensor
TensorFlow2/Recommendation/SIM
SIM
README
# SIM For TensorFlow 2 This repository provides a script and recipe to train the SIM model to achieve state-of-the-art accuracy. The content of this repository is tested and maintained by NVIDIA. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [BYO dataset functionality overview](#byo-dataset-functionality-overview) * [BYO dataset glossary](#byo-dataset-glossary) * [Dataset feature specification](#dataset-feature-specification) * [Data flow in NVIDIA Deep Learning Examples recommendation models](#data-flow-in-nvidia-deep-learning-examples-recommendation-models) * [Example of dataset feature specification](#example-of-dataset-feature-specification) * [BYO dataset functionality](#byo-dataset-functionality) * [Glossary](#glossary) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Prebatching](#prebatching) * [BYO dataset](#byo-dataset) * [Channel definitions and requirements](#channel-definitions-and-requirements) * [Training process](#training-process) * [Inference process](#inference-process) * [Log format](#log-format) * [Training log data](#training-log-data) * [Inference log data](#inference-log-data) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 32GB)](#training-accuracy-nvidia-dgx-1-8x-v100-32gb) * [Training stability test](#training-stability-test) * [Impact of mixed precision on training accuracy](#impact-of-mixed-precision-on-training-accuracy) * [Training accuracy plot](#training-accuracy-plot) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (8x A100 80GB)](#inference-performance-nvidia-dgx-a100-8x-a100-80gb) * [Inference performance: NVIDIA DGX-2 (16x V100 32GB)](#inference-performance-nvidia-dgx-2-16x-v100-32gb) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview Search-based Interest Model (SIM) is a system for predicting user behavior given sequences of previous interactions. The model is based on [Search-based User Interest Modeling with Lifelong Sequential Behavior Data for Click-Through Rate Prediction](https://arxiv.org/abs/2006.05639) paper which reports that it has been deployed at [Alibaba](https://www.alibaba.com) in the display advertising system. This repository provides a reimplementation of the code-base provided originally for [SIM](https://github.com/tttwwy/sim) and [DIEN](https://github.com/mouna99/dien) models (SIM model's inner component). There are several differences between this and the original SIM model implementation. First, this model is implemented in TensorFlow 2 using Python 3.8 instead of TensorFlow 1 in Python 2.7. Second, this implementation utilizes the user dimension (identifiers), which enables to train a personalized recommender system. Finally, the training code uses data preprocessed to [TFRecord](https://www.tensorflow.org/tutorials/load_data/tfrecord) format, which improves data loading. We also include scripts necessary to preprocess [Amazon Reviews dataset](https://snap.stanford.edu/data/web-Amazon.html) used in experiments. The table below provides a fine-grained summary of the differences between this repository and the original implementation. | Mode | Original implementation | This repository | |----------------|---------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------| | Python | 2.7 | 3.8 | | Dataset size | 135K samples | 12M samples | | Dataset format | CSV | TFRecord | | Model | - user id feature not included <br> - batch normalization included but not used correctly <br> - two-dimensional softmax output <br> - hardcoded features cardinalities <br>| - includes user id feature <br> - doesn`t include batch normalization <br> - one-dimensional sigmoid output <br> - features cardinalities deducted from dataset| In the author’s SIM implementation, the internals of submodels differs slightly between code and original papers (DIN, DIEN, SIM). Our implementation core is based on the paper's modules. For exact implementation details, refer to the list below. <details> <summary><b> List of implementation differences between original SIM code and DIN/DIEN/SIM papers </b></summary> - Batch normalization before MLP is not included in papers. - Batch normalization in code used `trainable=False` during the training phase. - ItemItemInteraction in DIN`s attention module in SIM implementation didn't correspond to activation unit inside DIN paper. - Element-wise subtraction and multiplications are fed to MLP, skipping outer product operation. - Sigmoids are used instead of PReLU/DICE in MLP. - Soft search MLP is missing a middle layer in implementation. - In the ESU part, multi-head attention is implemented as a DIN interaction block instead of a typical multi-head attention. - ESU part adds additional embedding by summing all the embedding passed from the GSU part. - DIEN auxiliary loss uses auxiliary network instead of the sigmoid of concatenated embeddings from the DIEN paper. </details> &nbsp; The model enables you to train a high-quality, personalized, sequential neural network-based recommender system. This model is trained with mixed precision using Tensor Cores on NVIDIA Volta and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 1.48x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture SIM model consists of two components: General Search Unit (GSU) and the Exact Search Unit (ESU). The goal of the former is to filter down possibly long historical user behavior sequence to a shorter and relevant sequence. On the other hand, ESU utilizes the most recent user behaviors for a candidate item, for example, estimate click-through rate for a candidate ad. Both parts are trained jointly using data on past user behaviors. A model architecture diagram is presented below. <p align="center"> <img width="150%" src="./images/sim.png"> <br> Figure 1. The architecture of the model. </p> Embeddings in model architecture diagram are obtained by passing each feature from the dataset through the Embedding Layer. Item features from target item, short behavior history and long behavior history share embedding tables. <p align="center"> <img src="./images/embedding.png"> <br> Figure 2. Embedding of input features. </p> ### Default configuration The following features are implemented in this model: - general - dynamic loss scaling for Tensor Cores (mixed precision) training - data-parallel multi-GPU training - preprocessing - dataset preprocessing using [NVtabular](https://github.com/NVIDIA-Merlin/NVTabular) library The following performance optimizations were implemented in this model: - dataloader based on [TFRecords](https://www.tensorflow.org/tutorials/load_data/tfrecord) ### Feature support matrix This model supports the following features: | Feature | SIM v1.0 TF2 | |----------------------------------------------------------------------------|--------------| | Horovod Multi-GPU (NCCL) | Yes | | Accelerated Linear Algebra (XLA) | Yes | | Automatic mixed precision (AMP) | Yes | | Preprocessing on GPU with NVTabular | Yes | | BYO dataset | Yes | #### Features **Multi-GPU training with Horovod** Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, refer to the example sources in this repository or refer to the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage). **Accelerated Linear Algebra (XLA)** XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. Enabling XLA results in improvements to speed and memory usage: most internal benchmarks run ~1.1-1.5x faster after XLA is enabled. **Automatic Mixed Precision (AMP)** AMP enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable. **Preprocessing on GPU with NVTabular** Preprocessing on GPU with NVTabular - Amazon Reviews dataset preprocessing can be conducted using [NVTabular](https://github.com/NVIDIA-Merlin/NVTabular). For more information on the framework, refer to [this blog post](https://developer.nvidia.com/blog/announcing-the-nvtabular-open-beta-with-multi-gpu-support-and-new-data-loaders). ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on NVIDIA Volta, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally. In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing `tf.contrib` loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. For information about: - How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, refer to [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. #### Enabling mixed precision To enable SIM training to use mixed precision, use `--amp` flag for the training script. Refer to the [Quick Start Guide](#quick-start-guide) for more information. #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on NVIDIA Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### BYO dataset functionality overview This section describes how you can train the DeepLearningExamples RecSys models on your own datasets without changing the model or data loader and with similar performance to the one published in each repository. This can be achieved thanks to Dataset Feature Specification, which describes how the dataset, data loader, and model interact with each other during training, inference, and evaluation. Dataset Feature Specification has a consistent format across all recommendation models in NVIDIA’s DeepLearningExamples repository, regardless of dataset file type and the data loader, giving you the flexibility to train RecSys models on your own datasets. - [BYO dataset glossary](#byo-dataset-glossary) - [Dataset Feature Specification](#dataset-feature-specification) - [Data Flow in Recommendation Models in DeepLearning examples](#data-flow-in-nvidia-deep-learning-examples-recommendation-models) - [Example of Dataset Feature Specification](#example-of-dataset-feature-specification) - [BYO dataset functionality](#byo-dataset-functionality) #### BYO dataset glossary The Dataset Feature Specification consists of three mandatory and one optional section: <b>feature_spec </b> provides a base of features that may be referenced in other sections, along with their metadata. Format: dictionary (feature name) => (metadata name => metadata value)<br> <b>source_spec </b> provides information necessary to extract features from the files that store them. Format: dictionary (mapping name) => (list of chunks)<br> * <i>Mappings</i> are used to represent different versions of the dataset (think: train/validation/test, k-fold splits). A mapping is a list of chunks.<br> * <i>Chunks</i> are subsets of features that are grouped together for saving. For example, some formats may constrain data saved in one file to a single data type. In that case, each data type would correspond to at least one chunk. Another example where this might be used is to reduce file size and enable more parallel loading. Chunk description is a dictionary of three keys:<br> * <i>type</i> provides information about the format in which the data is stored. Not all formats are supported by all models.<br> * <i>features</i> is a list of features that are saved in a given chunk. The order of this list may matter: for some formats, it is crucial for assigning read data to the proper feature.<br> * <i>files</i> is a list of paths to files where the data is saved. For Feature Specification in yaml format, these paths are assumed to be relative to the yaml file’s directory (basename). <u>Order of this list matters:</u> It is assumed that rows 1 to i appear in the first file, rows i+1 to j in the next one, etc. <br> <b>channel_spec</b> determines how features are used. It is a mapping (channel name) => (list of feature names). Channels are model-specific magic constants. In general, data within a channel is processed using the same logic. Example channels: model output (labels), categorical ids, numerical inputs, user data, and item data. <b>metadata</b> is a catch-all, wildcard section: If there is some information about the saved dataset that does not fit into the other sections, you can store it here. #### Dataset feature specification Data flow can be described abstractly: Input data consists of a list of rows. Each row has the same number of columns; each column represents a feature. The columns are retrieved from the input files, loaded, aggregated into channels and supplied to the model/training script. FeatureSpec contains metadata to configure this process and can be divided into three parts: * Specification of how data is organized on disk (source_spec). It describes which feature (from feature_spec) is stored in which file and how files are organized on disk. * Specification of features (feature_spec). Describes a dictionary of features, where key is the feature name and values are the features’ characteristics such as dtype and other metadata (for example, cardinalities for categorical features) * Specification of model’s inputs and outputs (channel_spec). Describes a dictionary of model’s inputs where keys specify model channel’s names and values specify lists of features to be loaded into that channel. Model’s channels are groups of data streams to which common model logic is applied, for example categorical/continuous data, and user/item ids. Required/available channels depend on the model The FeatureSpec is a common form of description regardless of underlying dataset format, dataset data loader form, and model. #### Data flow in NVIDIA Deep Learning Examples recommendation models The typical data flow is as follows: * <b>S.0.</b> Original dataset is downloaded to a specific folder. * <b>S.1.</b> Original dataset is preprocessed into Intermediary Format. For each model, the preprocessing is done differently, using different tools. The Intermediary Format also varies (for example, for DLRM PyTorch, the Intermediary Format is a custom binary one.) * <b>S.2.</b> The Preprocessing Step outputs Intermediary Format with dataset split into training and validation/testing parts along with the Dataset Feature Specification yaml file. Metadata in the preprocessing step is automatically calculated. * <b>S.3.</b> Intermediary Format data, together with the Dataset Feature Specification, are fed into training/evaluation scripts. The data loader reads Intermediary Format and feeds the data into the model according to the description in the Dataset Feature Specification. * <b>S.4.</b> The model is trained and evaluated <p align="center"> <img src="./images/df_diagram.png" /> <br> Figure 3. Data flow in Recommender models in NVIDIA Deep Learning Examples repository. Channels of the model are drawn in green</a>. </p> #### Example of dataset feature specification As an example, let’s consider a Dataset Feature Specification for a small CSV dataset for some abstract model. ```yaml feature_spec: user_gender: dtype: torch.int8 cardinality: 3 #M,F,Other user_age: #treated as numeric value dtype: torch.int8 user_id: dtype: torch.int32 cardinality: 2655 item_id: dtype: torch.int32 cardinality: 856 label: dtype: torch.float32 source_spec: train: - type: csv features: - user_gender - user_age files: - train_data_0_0.csv - train_data_0_1.csv - type: csv features: - user_id - item_id - label files: - train_data_1.csv test: - type: csv features: - user_id - item_id - label - user_gender - user_age files: - test_data.csv channel_spec: numeric_inputs: - user_age categorical_user_inputs: - user_gender - user_id categorical_item_inputs: - item_id label_ch: - label ``` The data contains five features: (user_gender, user_age, user_id, item_id, label). Their data types and necessary metadata are described in the feature specification section. In the source mapping section, two mappings are provided: one describes the layout of the training data, and the other of the testing data. The layout for training data has been chosen arbitrarily to showcase the flexibility. The train mapping consists of two chunks. The first one contains user_gender and user_age, saved as a CSV, and is further broken down into two files. For specifics of the layout, refer to the following example and consult the glossary. The second chunk contains the remaining columns and is saved in a single file. Notice that the order of columns is different in the second chunk - this is alright, as long as the order matches the order in that file (that is, columns in the .csv are also switched) Let’s break down the train source mapping. The table contains example data color-paired to the files containing it. <p align="center"> <img width="70%" src="./images/layout_example.png" /> </p> The channel spec describes how the data will be consumed. Four streams will be produced and available to the script/model. The feature specification does not specify what happens further: names of these streams are only lookup constants defined by the model/script. Based on this example, we can speculate that the model has three input channels: numeric_inputs, categorical_user_inputs, categorical_item_inputs, and one output channel: label. Feature names are internal to the FeatureSpec and can be freely modified. #### BYO dataset functionality In order to train any Recommendation model in NVIDIA Deep Learning Examples, one can follow one of three possible ways: * One delivers preprocessed datasets in the Intermediary Format supported by data loader used by the training script (different models use different data loaders) together with FeatureSpec yaml file describing at least specification of dataset, features, and model channels * One uses a transcoding script (**not supported in SIM model yet**) * One delivers datasets in non-preprocessed form and uses preprocessing scripts that are a part of the model repository. In order to use already existing preprocessing scripts, the format of the dataset needs to match one of the original datasets. This way, the FeatureSpec file will be generated automatically, but the user will have the same preprocessing as in the original model repository. ### Glossary **Auxiliary loss** is used to improve DIEN (so SIM as well) model training. It is constructed based on consecutive user actions from their short behavior history. **DIEN model** was proposed in [Deep Interest Evolution Network for Click-Through Rate Prediction](https://arxiv.org/abs/1809.03672) paper as an extension of the DIN model. It can also be used as a backbone for processing short interaction sequences in the SIM model. **DIN model** was proposed in [Deep Interest Network for Click-Through Rate Prediction](https://arxiv.org/abs/1706.06978) paper. It can be used as a backbone for processing short interaction sequences in the SIM model. **Long user behavior history** is the record of past user interactions. They are processed by the General Search Unit part of the SIM model (refer to Figure 1). This typically is a lightweight model aimed at processing longer sequences. **Short user behavior history** is the record of the most recent user interactions. They are processed by a more computationally intensive Exact Search Unit part of the SIM model (refer to Figure 1). **User behaviors** are users' interactions with given items of interest. Example interactions include reviewed items for Amazon Reviews dataset or clicks in the e-commerce domain. All the systems contained in this repository focus on modeling user interactions. ## Setup The following section lists the requirements that you need to meet in order to start training the SIM model. ### Requirements This repository contains a Dockerfile that extends the TensorFflow2 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [TensorFlow2 22.01-py3](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow/tags) NGC container - Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) For those unable to use the Tensorflow2 NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the SIM model on the Amazon Reviews dataset. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section. 1. Clone the repository. ```bash git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow2/Recommendation/SIM ``` 2. Build the SIM Tensorflow2 container. ```bash docker build -t sim_tf2 . ``` 3. Start an interactive session in the NGC container to run preprocessing, training, or inference (Amazon Books dataset can be mounted if it has already been downloaded, otherwise, refer to point 4). The SIM TensorFlow2 container can be launched with: ```bash docker run --runtime=nvidia -it --rm --ipc=host --security-opt seccomp=unconfined -v ${AMAZON_DATASET_PATH}:${RAW_DATASET_PATH} sim_tf2 bash ``` 4. (Optional) Download Amazon Books dataset: ```bash scripts/download_amazon_books_2014.sh export RAW_DATASET_PATH=/data/amazon_books_2014 ``` 5. Start preprocessing. For details of the required file format and certain preprocessing parameters refer to [BYO dataset](#byo-dataset). ```bash python preprocessing/sim_preprocessing.py \ --amazon_dataset_path ${RAW_DATASET_PATH} \ --output_path ${PARQUET_PATH} python preprocessing/parquet_to_tfrecord.py \ --amazon_dataset_path ${PARQUET_PATH} \ --tfrecord_output_dir ${TF_RECORD_PATH} ``` 6. Start training (`${GPU}` is an arbitrary number of GPUs to be used). ```bash mpiexec --allow-run-as-root --bind-to socket -np ${GPU} python main.py \ --dataset_dir ${TF_RECORD_PATH} \ --mode train \ --model_type sim \ --embedding_dim 16 \ --drop_remainder \ --optimizer adam \ --lr 0.01 \ --epochs 3 \ --global_batch_size 131072 \ --amp ``` 7. Start inference. ```bash mpiexec --allow-run-as-root --bind-to socket -np ${GPU} python main.py \ --dataset_dir ${TF_RECORD_PATH} \ --mode inference \ --model_type sim \ --embedding_dim 16 \ --global_batch_size 131072 \ --amp ``` For the explanation of output logs, refer to [Log format](#log-format) section. Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark your performance to [Training performance benchmark](#training-performance-results), or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section. ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code The `main.py` script provides an entry point to all the provided functionalities. This includes running training and inference modes. The behavior of the script is controlled by command-line arguments listed below in the [Parameters](#parameters) section. The `preprocessing` folder contains scripts to prepare data. In particular, the `preprocessing/sim_preprocessing.py` script can be used to preprocess the Amazon Reviews dataset while `preprocessing/parquet_to_tfrecord.py` transforms parquet files to TFRecords for loading data efficiently. Models are implemented in corresponding modules in the `sim/models` subdirectory, for example, `sim/models/sim_model.py` for the SIM model. The `sim/layers` module contains definitions of different building blocks for the models. Finally, the `sim/data` subdirectory provides modules for the dataloader. Other useful utilities are contained in the `sim/utils` module. ### Parameters The `main.py` script parameters are detailed in the following table. | Scope | Parameter | Description | Default Value | |-----------------|---------------------------|-------------------------------------------------------------------------|---------------------------| | model | model_type | Model type | sim | | model | embedding_dim | Embedding dimension for different entities (users, items & categories) | 16 | | model | stage_one_mlp_dims | MLP hidden dimensions for the stage one component | 200 | | model | stage_two_mlp_dims | MLP hidden dimensions for the stage two component | 200,80 | | model | aux_mlp_dims | MLP hidden dimensions for the auxiliary loss | 100,50 | | datasets | dataset_dir | Path to the directory containing feature specification file and dataset splits | -- | | datasets | feature_spec | Name of the feature spec file in the dataset directory | feature_spec.yaml | | training | optimizer | Optimizer to use | adam | | training | lr | Learning rate for the optimizer selected | 0.01 | | training | weight_decay | Parameters decay of the optimizer selected | 0 | | training | epochs | Train for the following number of epochs | 3 | | training | global_batch_size | Batch size used for training, evaluation and inference | 131072 | | training | dropout_rate | Dropout rate for all the classification MLPs | -1 (disabled) | | training | amp | Enable automatic mixed precision training (flag) | False | | training | xla | Enable XLA conversion (flag) | False | | training | drop_remainder | Drop remainder batch for training set (flag) | False | | training | disable_cache | Disable dataset caching after the first time it is iterated over (flag) | False | | training | repeat_count | Repeat training dataset this number of times | 0 | | training | prefetch_train_size | Number of batches to prefetch in training. | 10 | | training | prefetch_test_size | Number of batches to prefetch in evaluation. | 2 | | training | long_seq_length | Determines the long history - short history split of history features | 90 | | training | prebatch_train_size | Batch size of batching applied during preprocessing to train dataset. | 0 | | training | prebatch_test_size | Batch size of batching applied during preprocessing to test dataset. | 0 | | results | results_dir | Path to the model result files storage | /tmp/sim | | results | log_filename | Name of the file to store logger output | log.json | | results | save_checkpoint_path | Directory to save model checkpoints | "" | | results | load_checkpoint_path | Directory to restore model checkpoints from | "" | | run mode | mode | One of: train, inference. | train | | run mode | benchmark | Perform benchmark measurements for, e.g., throughput calculation (flag) | False | | run mode | benchmark_warmup_steps | Number of warmup steps to use for performance benchmarking | 20 | | run mode | benchmark_steps | Number of steps to use for performance benchmarking | 200 | | run mode | affinity | Type of CPU affinity | socket_unique_interleaved | | run mode | inter_op_parallelism | Number of inter op threads | 0 | | run mode | intra_op_parallelism | Number of intra op threads | 0 | | run mode | num_parallel_calls | Parallelism level for tf.data API. If None, heuristic based on number of CPUs and number of GPUs will be used | None | | reproducibility | seed | Random seed | -1 | ### Command-line options To view the full list of available options and their descriptions, use the `--help` command-line option, for example: ```bash python main.py --help ``` ### Getting the data The SIM model was trained on the Books department subset of [Amazon Reviews](https://snap.stanford.edu/data/web-Amazon.html) dataset. The dataset is split into two parts: training and test data. The test set for evaluation was constructed using the last user interaction from user behavior sequences. All the preceding interactions are used for training. This repository contains the `scripts/download_amazon_books_2014.sh`, which can be used to download the dataset. #### Dataset guidelines The preprocessing steps applied to the raw data include: - Sampling negatives randomly (out of all possible items) - Choosing the last category as the item category (in case more than one is available) - Determining embedding table sizes for categorical features needed to construct a model - Filter users for training split based on their number of interactions (discard users with less than 20 interactions) #### Prebatching Preprocessing scripts allow to apply batching prior to the model`s dataloader. This reduces the size of produced TFrecord files and speeds up dataloading. To do so, specify `--prebatch_train_size` and `--prebatch_test_size` while converting data using `scripts/parquet_to_tfrecord.py`. Later, while using the `main.py` script, pass the information about applied prebatch size via the same parameters. Example Start preprocessing from step 5. from [Quick Start Guide](#quick-start-guide): ```bash python preprocessing/sim_preprocessing.py \ --amazon_dataset_path ${RAW_DATASET_PATH} \ --output_path ${PARQUET_PATH} python preprocessing/parquet_to_tfrecord.py \ --amazon_dataset_path ${PARQUET_PATH} \ --tfrecord_output_dir ${TF_RECORD_PATH} \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_train_size ${PREBATCH_TEST_SIZE} ``` And then train the model (step 6.): ```bash mpiexec --allow-run-as-root --bind-to socket -np ${GPU} python main.py \ --dataset_dir ${TF_RECORD_PATH} \ --mode train \ --model_type sim \ --embedding_dim 16 \ --drop_remainder \ --optimizer adam \ --lr 0.01 \ --epochs 3 \ --global_batch_size 131072 \ --amp \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_train_size ${PREBATCH_TEST_SIZE} ``` <details> <summary><b>Prebatching details</b></summary> - The last batch for each split will pe saved to the separate file `remainder.tfrecord` unless there are enough samples to form a full batch. - Final batch size used in main script can be a multiple of prebatch size. - Final batch size used in main script can be a divider of prebatch size. In this case, when using multi GPU training, the number of batches received by each worker can be greater than 1 thus resulting in error during allgather operation. Dataset size, batch size and prebatch size have to be chosen with that limitation in mind. - For the orignal Amazon Books Dataset, parameters were set to PREBATCH_TRAIN_SIZE = PREBATCH_TEST_SIZE = 4096 for performance benchmarking purposes. </details> &nbsp; #### BYO dataset This implementation supports using other datasets thanks to BYO dataset functionality. BYO dataset functionality allows users to plug in their dataset in a common fashion for all Recommender models that support this functionality. Using BYO dataset functionality, the user does not have to modify the source code of the model thanks to the Feature Specification file. For general information on how the BYO dataset works, refer to the [BYO dataset overview section](#byo-dataset-functionality-overview). For usage of preprocessing scripts refer to [Quick Start Guide](#quick-start-guide) There are currently two ways to plug in the user's dataset: <details> <summary><b>1. Provide preprocessed dataset in parquet format, then use parquet_to_tfrecord.py script to convert it to Intermediary Format and automatically generate FeatureSpec.</b></summary> Parquet $DATASET needs to have the following directory structure (or change the name with script arguments): ``` DATASET: metadata.json test: part.0.parquet part.1.parquet . . . train: part.0.parquet part.1.parquet . . . ``` `metadata.json` should contain cardinalities of each categorical feature present in the dataset and be of the following structure: (for features `uid`, `item`, `cat`) ```yaml { "cardinalities": [ {"name": "uid", "value": 105925}, {"name": "item", "value": 1209081}, {"name": "cat", "value": 2330} ] } ``` Make sure the dataset's columns are in the same order as entries in `metadata.json` (for user features and **item features in each channel**) Columns of parquets files must be organized in a specific order: - one column with `label` values - `number_of_user_features` (to be specified in script argument) columns follow with each **user feature** in the separate column - `number_of_item_features` columns. One column for each feature of **target (query) item** - `number_of_item_features` columns. Column with index i contains **sequence of item_feature_{i}** of **positive_history** - `number_of_item_features` columns. Column with index i contains **sequence of item_feature_{i} **of **negative_history** </details> <details> <summary><b>2. Provide preprocessed dataset in tfrecord format with feature_spec.yaml describing the details. </b></summary> Required channels and sample layout can be found in the configuration shown below. This is the file layout and feature specification for the original Amazon dataset. Files layout: ``` TF_RECORD_PATH: feature_spec.yaml test.tfrecord train.tfrecord ``` feature_spec.yaml: ```yaml channel_spec: label: - label negative_history: - item_id_neg - cat_id_neg positive_history: - item_id_pos - cat_id_pos target_item_features: - item_id_trgt - cat_id_trgt user_features: - user_id feature_spec: item_id_neg: cardinality: 1209081 dimensions: - 100 dtype: int64 item_id_pos: cardinality: 1209081 dimensions: - 100 dtype: int64 item_id_trgt: cardinality: 1209081 dtype: int64 cat_id_neg: cardinality: 2330 dimensions: - 100 dtype: int64 cat_id_pos: cardinality: 2330 dimensions: - 100 dtype: int64 cat_id_trgt: cardinality: 2330 dtype: int64 label: dtype: bool user_id: cardinality: 105925 dtype: int64 metadata: {} source_spec: test: - features: &id001 - label - user_id - item_id_trgt - cat_id_trgt - item_id_pos - cat_id_pos - item_id_neg - cat_id_neg files: - test.tfrecord type: tfrecord train: - features: *id001 files: - train.tfrecord type: tfrecord ``` `dimensions` should contain the length of the sequencial features. Note that corresponsive features in `negative_history`, `positive_history`, `target_item_features` need to be listed in the same order in channel spec in each channel since they share embedding tables in the model. (for example `item_id` needs to be first and `cat_id` second). </details> &nbsp; ##### Channel definitions and requirements This model defines five channels: - label, accepting a single feature - negative_history, accepting a categorical ragged tensor for an arbitrary number of features - positive_history, accepting a categorical ragged tensor for an arbitrary number of features - target_item_features, accepting an arbitrary number of categorical features - user_features, accepting an arbitrary number of categorical features Features in `negative_history`, `positive_history` and `target_item_features` channels must be equal in number and must be defined in the same order in channel spec. The training script expects two mappings: - train - test For performance reasons, the only supported dataset type is tfrecord. ### Training process Training can be run using `main.py` script by specifying the `--mode train` parameter. The speed of training is measured by throughput, that is, the number of samples processed per second. Evaluation is based on the [Area under ROC Curve (ROC AUC)](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) metric. Model checkpoints may be stored using Checkpoint manager via the `--save_checkpoint_path` and `--load_checkpoint_path` parameters. Training and inference logs are saved to a directory specified via the `--results_dir` parameter. Mixed precision training is supported via the `--amp` flag. Multi-GPU training is performed using mpiexec and Horovod libraries. ### Inference process Inference can be run using `main.py` script by specifying the `--mode inference` parameter. It is performed using a dummy model initialized randomly, and it is intended to measure inference throughput. The most important parameter for inference is the batch size. Example usage of training and inference are demonstrated in [Quick Start Guide](#quick-start-guide). ### Log format There are three type of log lines during model execution. Each of them have `step` value, however it is formatted differently based on the type of log: - <b>step log</b> - step value is in format `[epoch, step]`: DLLL {"timestamp": ..., "datetime": ..., "elapsedtime": ..., "type": ..., `"step": [2, 79]`, "data": ...} - <b>end of epoch log</b> - step value is in format `[epoch]`: DLLL {"timestamp": ..., "datetime": ..., "elapsedtime": ..., "type": ..., `"step": [2]`, "data": ...} - <b>summary log</b> - logged once at the end of script execution. Step value is in fomat `[]`: DLLL {"timestamp": ..., "datetime": ..., "elapsedtime": ..., "type": ..., `"step": []`, "data": ...} In those logs, `data` field contains dictonary in form `{metric: value}`. Metrics logged differ based on log type (step, end of epoch, summary) and model mode (training, inference). #### Training log data - <b> step log </b> - classification_loss - loss at the final output of the model. - dien_aux_loss - loss at the output of auxiliary model. - total_loss - sum of the above. - samples/s - estimated throughput in samples per second. - <b> end of epoch log </b> - throughput - average throughput during epoch in samples/s. - time - epoch time in seconds. - train_auc - AUC during evaluation on train set. - test_auc - AUC during evaluation on test set. - train_loss - loss during evaluation on train set. - test_loss - loss during evaluation on test set. - latency_[mean, p90, p95, p99] - latencies in miliseconds. - <b> summary log </b> - time_to_train - total training time in seconds. - train_auc, test_auc, train_loss, test_loss - results from the last epoch (see above). #### Inference log data - <b> step log </b> - samples/s - estimated throughput in samples per second. - <b> end of epoch log is not present</b> - <b> summary log </b> - throughput - average throughput during epoch in samples/s. - time - total execution time in seconds. - latency_[mean, p90, p95, p99] - latencies in miliseconds. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA's latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark the training performance on a specific batch size, run: ```bash mpiexec --allow-run-as-root --bind-to socket -np ${GPU} python main.py \ --dataset_dir ${TF_RECORD_PATH} \ --mode train \ --model_type sim \ --global_batch_size 131072 \ --drop_remainder \ --amp \ --benchmark \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_test_size ${PREBATCH_TEST_SIZE} ``` Equivalent: ```bash scripts/run_model.sh \ --data_path ${TF_RECORD_PATH} \ --gpus ${GPU} \ --amp 1 \ --benchmark 1 \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_test_size ${PREBATCH_TEST_SIZE} ``` #### Inference performance benchmark To benchmark the inference performance on a specific batch size, run: ```bash mpiexec --allow-run-as-root --bind-to socket -np ${GPU} python main.py \ --dataset_dir ${TF_RECORD_PATH} \ --mode inference \ --model_type sim \ --global_batch_size 131072 \ --amp \ --benchmark \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_test_size ${PREBATCH_TEST_SIZE} ``` Equivalent: ```bash scripts/run_model.sh \ --data_path ${TF_RECORD_PATH} \ --gpus ${GPU} \ --amp 1 \ --benchmark 1 \ --prebatch_train_size ${PREBATCH_TRAIN_SIZE} \ --prebatch_test_size ${PREBATCH_TEST_SIZE} ``` ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results Our results were obtained by running the `run_model.sh` bash script in the TensorFlow2 21.10-py3 NGC container. Experiments were run on 1 and 8 GPUs, with FP32/TF32 Precision and AMP and with XLA-OFF/XLA-ON. Dataset was prebatched with the size of 16384. Other parameters were set to defaults. There were 10 runs for each configuration. In the `Training accuracy` sections, average values are reported. In the `Training stability` sections, values from all runs are included in plots. ##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) |GPUs | XLA |Time to train - TF32 (seconds) | Time to train - mixed precision (seconds) | AUC - TF32 | AUC - mixed precision | Time to train speedup (TF32 to mixed precision) | |-----|----------|-------|--------|---------|-------------|---------------| |1 |XLA-OFF |133.62 | 109.29 |0.82 |0.811 | 1.22 | |1 |XLA-ON |132.31 | 113.91 |0.811 |0.822 | 1.16 | |8 |XLA-OFF |35.17 | 34.08 |0.813 |0.808 | 1.03 | |8 |XLA-ON |39.19 | 40.16 |0.814 |0.811 | 0.98 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 32GB) |GPUs | XLA |Time to train - FP32 (seconds) | Time to train - mixed precision (seconds) | AUC - FP32 | AUC - mixed precision | Time to train speedup (FP32 to mixed precision) | |-----|----------|-------|--------|---------|-------------|---------------| |1 |XLA-OFF | 210.70 | 154.54 |0.815 |0.817 | 1.36 | |1 |XLA-ON | 203.61 | 159.80 | 0.816 |0.813 | 1.27 | |8 |XLA-OFF | 48.643 | 44.02 | 0.811 | 0.817 | 1.11| |8 |XLA-ON | 55.26 | 54.33 | 0.814 | 0.817 | 1.02| ##### Training stability test Training stability was tested over 10 runs for each configuration of double precision / AMP, XLA-ON / XLA-OFF on 1 GPU and 8 GPUs for both Volta and Ampere architectures. Each run used the same random seed and default values of training hyperparameters. Training was performed on DGX A100 80GB and DGX-1 V100 32GB setups. AUC metric achieved on test set after training is presented in the following plots. (Plot represents XLA-OFF results, for XLA-ON results, check expandable part below) <p align="center"> <img src="./images/stability_no_xla.png"> <br> Figure 4. Training stability plot, distribution of AUC across different configurations with XLA-OFF. </p> <details> <summary><b> Training stability with XLA-ON </b></summary> <p align="center"> <img src="./images/stability_xla.png"> <br> Figure 5. Training stability plot, distribution of AUC across different configurations with XLA-ON. </p> </details> <details> <summary><b> Complete list of training stability results </b></summary> | |GPUs |Precision |XLA |Mean AUC |Std AUC |Min AUC |Max AUC | |----------|-----|----------|----------|----------|----------|----------|----------| |DGX A100 |1 |TF32 |XLA-OFF |0.8195 |0.0083 |0.7981 |0.8307 | |DGX A100 |1 |TF32 |XLA-ON |0.8106 |0.0066 |0.8012 |0.8211 | |DGX A100 |1 |AMP |XLA-OFF |0.8110 |0.0103 |0.7939 |0.8244 | |DGX A100 |1 |AMP |XLA-ON |0.8224 |0.0067 |0.8115 |0.8397 | |DGX A100 |8 |TF32 |XLA-OFF |0.8127 |0.0070 |0.8027 |0.8285 | |DGX A100 |8 |TF32 |XLA-ON |0.8143 |0.0079 |0.8012 |0.8251 | |DGX A100 |8 |AMP |XLA-OFF |0.8084 |0.0121 |0.7850 |0.8203 | |DGX A100 |8 |AMP |XLA-ON |0.8109 |0.0077 |0.8018 |0.8281 | |DGX-1 V100|1 |FP32 |XLA-OFF |0.8152 |0.0075 |0.8006 |0.8255 | |DGX-1 V100|1 |FP32 |XLA-ON |0.8158 |0.0055 |0.8060 |0.8261 | |DGX-1 V100|1 |AMP |XLA-OFF |0.8172 |0.0045 |0.8097 |0.8237 | |DGX-1 V100|1 |AMP |XLA-ON |0.8133 |0.0070 |0.7987 |0.8234 | |DGX-1 V100|8 |FP32 |XLA-OFF |0.8112 |0.0055 |0.8027 |0.8182 | |DGX-1 V100|8 |FP32 |XLA-ON |0.8144 |0.0087 |0.8037 |0.8281 | |DGX-1 V100|8 |AMP |XLA-OFF |0.8173 |0.0061 |0.8080 |0.8277 | |DGX-1 V100|8 |AMP |XLA-ON |0.8169 |0.0109 |0.7952 |0.8326 | </details> &nbsp; For both NVIDIA Ampere and NVIDIA Volta, even though the same seed was used for each run, there is a still noticeable variance. The reason for that are built-in non-deterministic GPU kernels in [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum) operation. However, since it is six times faster than the deterministic implementation of this operation, this is the preferable solution. ##### Impact of mixed precision on training accuracy Results in this section present the impact of enabling AMP on the AUC. Models were trained using default parameters, on 1/8 GPUs and on Volta/Ampere architecture. AUC is measured on test set after model training. (Plot represents XLA-OFF results, for XLA-ON results, check expandable part below) <p align="center"> <img src="./images/auc_amp_impact_no_xla.png"> <br> Figure 6. Impact of AMP on test set AUC (XLA-OFF) </p> <details> <summary><b> Impact of AMP on AUC for XLA-ON </b></summary> <p align="center"> <img src="./images/auc_amp_impact_xla.png"> <br> Figure 7. Impact of AMP on test set AUC (XLA-ON) </p> </details> &nbsp; Distribution scores for full precision training and AMP training were compared in terms of mean, variance and Kolmogorov–Smirnov test to state statistical difference between full precision and AMP results. Refer to the expandable table below. <details> <summary><b> Full tabular data for AMP influence on AUC </b></summary> | |GPUs |XLA |Mean AUC for Full precision (TF32 for A100, FP32 for V100)|Std AUC for Full precision (TF32 for A100, FP32 for V100)|Mean AUC for AMP |Std AUC for AMP |KS test value: statistics, p-value| |------|-----|-----|------------------------------|------------------------------|--------------------|--------------------|--------------------| |DGX A100|1 |XLA-OFF|0.8195 |0.0083 |0.8110 |0.0103 |0.6000, 0.0524 | |DGX A100|1 |XLA-ON|0.8106 |0.0066 |0.8224 |0.0067 |0.7000, 0.0123 | |DGX A100|8 |XLA-OFF|0.8127 |0.0070 |0.8084 |0.0121 |0.2000, 0.9945 | |DGX A100|8 |XLA-ON|0.8143 |0.0079 |0.8109 |0.0077 |0.4000, 0.4175 | |DGX-1 V100|1 |XLA-OFF|0.8152 |0.0075 |0.8172 |0.0045 |0.2000, 0.9945 | |DGX-1 V100|1 |XLA-ON|0.8158 |0.0055 |0.8133 |0.0070 |0.2000, 0.9945 | |DGX-1 V100|8 |XLA-OFF|0.8112 |0.0055 |0.8173 |0.0061 |0.4000, 0.4175 | |DGX-1 V100|8 |XLA-ON|0.8144 |0.0087 |0.8169 |0.0109 |0.4000, 0.4175 | </details> &nbsp; #### Training accuracy plot Models trained with FP32, TF32, and Automatic Mixed Precision (AMP) achieve similar accuracy. Plot represents ROC AUC on the test set for 1 and 8 GPUs, with precision FP32/TF32 (for Volta/Ampere) and AMP. All other training parameters are default. <p align="center"> <img src="./images/sim_roc.png"> <br> Figure 8. ROC curve for different configurations of Ampere/Volta, 1/8 GPUs, double precision / AMP. (XLA-OFF) </p> #### Training performance results Our results were obtained by running the `scripts/run_model.sh` script in the TensorFlow2 21.10-py3 NGC container. Dataset was prebatched with the size of 16384. Numbers were averaged over 10 separate runs for each configuration. For each run, performance numbers (in samples per second) were averaged over training steps from one epoch which gives reliable estimates of the throughput. We also exclude the first 20 steps of training as a warmup phase. The cumulative batch size of all GPUs in performance tests was set to 131072. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training performance: NVIDIA DGX A100 (8x A100 80GB) | GPUs | XLA | Throughput - TF32 (samples/s) | Throughput - mixed precision (samples/s) | Throughput speedup (mixed precision / TF32) | Strong scaling - TF32 | Strong scaling - mixed precision | |-------:|------:|--------------------------------:|-------------------------------------------:|----------------------------------------------:|------------------------:|-----------------------------------:| | 1 | OFF | 377254.65 | 479921.54 | 1.27 | 1.00 | 1.00 | | 1 | ON | 455724.01 | 565221.04 | 1.24 | 1.00 | 1.00 | | 8 | OFF | 2161681.55 | 2603489.60 | 1.20 | 5.73 | 5.42 | | 8 | ON | 2662368.18 | 2979441.80 | 1.12 | 5.84 | 5.27 | <details> <summary><b> NVIDIA DGX A100 XLA-ON / XLA-OFF training speedup </b></summary> For each configuration of parameters present in the table, the `Speedup` column shows the speedup achieved by turning on XLA. |GPUs |Precision |Speedup | |-----|---------------|--------| |1 |TF32 |1.208 | |1 |AMP |1.178 | |8 |TF32 |1.232 | |8 |AMP |1.119 | </details> &nbsp; ##### Training performance: NVIDIA DGX-2 (16x V100 32GB) | GPUs | XLA | Throughput - FP32 (samples/s) | Throughput - mixed precision (samples/s) | Throughput speedup (mixed precision / FP32) | Strong scaling - FP32 | Strong scaling - mixed precision | |-------:|------:|--------------------------------:|-------------------------------------------:|----------------------------------------------:|------------------------:|-----------------------------------:| | 1 | OFF | 209376.38 | 309752.48 | 1.48 | 1.00 | 1.00 | | 1 | ON | 245414.62 | 348945.59 | 1.42 | 1.00 | 1.00 | | 8 | OFF | 1310239.01 | 1689602.79 | 1.29 | 6.26 | 5.45 | | 8 | ON | 1483120.32 | 1962226.32 | 1.32 | 6.04 | 5.62 | | 16 | OFF | 2127221.65 | 2555926.79 | 1.20 | 10.16 | 8.25 | | 16 | ON | 2450499.40 | 2788997.07 | 1.14 | 9.99 | 7.99 | <details> <summary><b> NVIDIA DGX-2 XLA-ON / XLA-OFF training speedup </b></summary> For each configuration of parameters present in the table, the `Speedup` column shows the speedup achieved by turning on XLA. |GPUs |AMP |Speedup | |-----|--------------------|---------------| |1 |FP32 |1.172 | |1 |AMP |1.127 | |8 |FP32 |1.132 | |8 |AMP |1.161 | |16 |FP32 |1.152 | |16 |AMP |1.091 | </details> &nbsp; <details> <summary><b> NVIDIA DGX A100 / DGX-2 (Ampere / Volta) training speedup </b></summary> | GPUs | XLA | Precision | Speedup | |-------:|------:|:------------|----------:| | 1 | OFF | TF32/FP32 | 1.802 | | 1 | OFF | AMP | 1.549 | | 1 | ON | TF32/FP32 | 1.857 | | 1 | ON | AMP | 1.620 | | 8 | OFF | TF32/FP32 | 1.650 | | 8 | OFF | AMP | 1.541 | | 8 | ON | TF32/FP32 | 1.795 | | 8 | ON | AMP | 1.518 | </details> &nbsp; #### Inference performance results Our results were obtained by running the `scripts/run_model.sh` script in the TensorFlow2 21.10-py3 NGC container. Numbers were averaged over 10 separate runs for each configuration. For each run, performance numbers (in samples per second) were averaged over training steps from one epoch which gives reliable estimates of the throughput. We also exclude the first 20 steps of training as a warmup phase. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Inference performance: NVIDIA DGX A100 (8x A100 80GB) | Batch Size | XLA | Throughput - TF32 (samples/s) | Throughput - mixed precision (samples/s) | Throughput speedup (mixed precision / TF32) | |--------------------:|------:|--------------------------------:|-------------------------------------------:|----------------------------------------------:| | 4096 | ON | 618547.45 | 669640.65 | 1.08 | | 8192 | ON | 722801.14 | 849101.88 | 1.17 | | 16384 | ON | 859418.77 | 1051361.67 | 1.22 | | 32768 | ON | 976771.70 | 1269000.97 | 1.30 | | 65536 | ON | 1082688.51 | 1444729.52 | 1.33 | | 131072 | ON | 1094733.64 | 1483542.86 | 1.36 | <details> <summary><b> Complete table of DGX A100 inference performance results </b></summary> | Batch Size | XLA | Precision | Throughput (samples/s) | |-------------:|:------|:------------|:--------------------------| | 4096 | OFF | TF32 | 708349.73 ± 14161.58 | | 8192 | OFF | TF32 | 873335.82 ± 8539.56 | | 16384 | OFF | TF32 | 937987.79 ± 12114.34 | | 32768 | OFF | TF32 | 943313.07 ± 8631.81 | | 65536 | OFF | TF32 | 960794.46 ± 7388.45 | | 131072 | OFF | TF32 | 966245.27 ± 8637.82 | | 4096 | OFF | AMP | 645394.94 ± 14844.27 | | 8192 | OFF | AMP | 919410.07 ± 11355.28 | | 16384 | OFF | AMP | 1136346.66 ± 14529.91 | | 32768 | OFF | AMP | 1216810.45 ± 21013.12 | | 65536 | OFF | AMP | 1287305.05 ± 19373.18 | | 131072 | OFF | AMP | 1298478.97 ± 10733.67 | | 4096 | ON | TF32 | 618547.45 ± 6569.97 | | 8192 | ON | TF32 | 722801.14 ± 9448.19 | | 16384 | ON | TF32 | 859418.77 ± 10012.61 | | 32768 | ON | TF32 | 976771.70 ± 13377.36 | | 65536 | ON | TF32 | 1082688.51 ± 8523.55 | | 131072 | ON | TF32 | 1094733.64 ± 11157.18 | | 4096 | ON | AMP | 669640.65 ± 9319.68 | | 8192 | ON | AMP | 849101.88 ± 14068.04 | | 16384 | ON | AMP | 1051361.67 ± 15310.42 | | 32768 | ON | AMP | 1269000.97 ± 23971.56 | | 65536 | ON | AMP | 1444729.52 ± 18011.54 | | 131072 | ON | AMP | 1483542.86 ± 6751.29 | </details> <details> <summary><b> DGX A100 XLA-ON / XLA-OFF inference Speedup </b></summary> For each configuration of parameters present in the table, the `Speedup` column shows the speedup achieved by turning on XLA. |Batch Size |Precision |Speedup | |--------------------|---------------|--------| |4096 |TF32 |0.873 | |8192 |TF32 |0.828 | |16384 |TF32 |0.916 | |32768 |TF32 |1.035 | |65536 |TF32 |1.127 | |131072 |TF32 |1.133 | |4096 |AMP |1.038 | |8192 |AMP |0.924 | |16384 |AMP |0.925 | |32768 |AMP |1.043 | |65536 |AMP |1.187 | |131072 |AMP |1.143 | </details> &nbsp; ##### Inference performance: NVIDIA DGX-2 (16x V100 32GB) | Batch Size | XLA | Throughput - FP32 (samples/s) | Throughput - mixed precision (samples/s) | Throughput speedup (mixed precision / FP32) | |--------------------:|------:|--------------------------------:|-------------------------------------------:|----------------------------------------------:| | 4096 | ON | 444532.22 | 541975.24 | 1.22 | | 8192 | ON | 505047.64 | 642784.48 | 1.27 | | 16384 | ON | 549325.54 | 727077.63 | 1.32 | | 32768 | ON | 587452.73 | 788606.35 | 1.34 | | 65536 | ON | 605187.67 | 832651.59 | 1.38 | | 131072 | ON | 599557.03 | 840602.90 | 1.40 | <details> <summary><b> Complete table of DGX-2 inference performance results </b></summary> | Batch Size | XLA | Precision | Throughput (samples/s) | |-------------:|:------|:------------|:--------------------------| | 4096 | OFF | FP32 | 459175.30 ± 23184.33 | | 8192 | OFF | FP32 | 499179.20 ± 15967.26 | | 16384 | OFF | FP32 | 525180.72 ± 2521.56 | | 32768 | OFF | FP32 | 532042.10 ± 4020.44 | | 65536 | OFF | FP32 | 534307.20 ± 7276.26 | | 131072 | OFF | FP32 | 532311.44 ± 6195.16 | | 4096 | OFF | AMP | 581771.66 ± 6163.50 | | 8192 | OFF | AMP | 665048.04 ± 4607.95 | | 16384 | OFF | AMP | 716355.19 ± 7174.98 | | 32768 | OFF | AMP | 741642.61 ± 4981.04 | | 65536 | OFF | AMP | 755141.25 ± 6175.05 | | 131072 | OFF | AMP | 744459.46 ± 8183.17 | | 4096 | ON | FP32 | 444532.22 ± 6239.01 | | 8192 | ON | FP32 | 505047.64 ± 6543.06 | | 16384 | ON | FP32 | 549325.54 ± 2841.21 | | 32768 | ON | FP32 | 587452.73 ± 2366.43 | | 65536 | ON | FP32 | 605187.67 ± 3740.07 | | 131072 | ON | FP32 | 599557.03 ± 11811.28 | | 4096 | ON | AMP | 541975.24 ± 4441.93 | | 8192 | ON | AMP | 642784.48 ± 4721.08 | | 16384 | ON | AMP | 727077.63 ± 5332.80 | | 32768 | ON | AMP | 788606.35 ± 11705.36 | | 65536 | ON | AMP | 832651.59 ± 10401.17 | | 131072 | ON | AMP | 840602.90 ± 16358.73 | </details> <details> <summary><b> DGX-2 XLA-ON / XLA-OFF inference speedup </b></summary> For each configuration of parameters present in the table, the `Speedup` column shows the speedup achieved by turning on XLA. |Batch Size |Precision |Speedup | |--------------------|---------------|--------| |4096 |TF32 |0.968 | |8192 |TF32 |1.012 | |16384 |TF32 |1.046 | |32768 |TF32 |1.104 | |65536 |TF32 |1.133 | |131072 |TF32 |1.126 | |4096 |AMP |0.932 | |8192 |AMP |0.967 | |16384 |AMP |1.384 | |32768 |AMP |1.063 | |65536 |AMP |1.103 | |131072 |AMP |1.129 | </details> &nbsp; <details> <summary><b> NVIDIA A100 / DGX-2 (Ampere / Volta) inference speedup </b></summary> | Batch Size | XLA | Precision | Speedup | |-------------:|:------|:------------|----------:| | 4096 | OFF | TF32/FP32 | 1.54 | | 8192 | OFF | TF32/FP32 | 1.75 | | 16384 | OFF | TF32/FP32 | 1.79 | | 32768 | OFF | TF32/FP32 | 1.77 | | 65536 | OFF | TF32/FP32 | 1.80 | | 131072 | OFF | TF32/FP32 | 1.81 | | 4096 | OFF | AMP | 1.11 | | 8192 | OFF | AMP | 1.38 | | 16384 | OFF | AMP | 1.59 | | 32768 | OFF | AMP | 1.64 | | 65536 | OFF | AMP | 1.71 | | 131072 | OFF | AMP | 1.74 | | 4096 | ON | TF32/FP32 | 1.39 | | 8192 | ON | TF32/FP32 | 1.43 | | 16384 | ON | TF32/FP32 | 1.56 | | 32768 | ON | TF32/FP32 | 1.66 | | 65536 | ON | TF32/FP32 | 1.79 | | 131072 | ON | TF32/FP32 | 1.83 | | 4096 | ON | AMP | 1.24 | | 8192 | ON | AMP | 1.32 | | 16384 | ON | AMP | 1.45 | | 32768 | ON | AMP | 1.61 | | 65536 | ON | AMP | 1.74 | | 131072 | ON | AMP | 1.76 | </details> &nbsp; ## Release notes ### Changelog May 2022 - Initial release November 2022 - Moved batching and padding operations to preprocessing - Added support for prebatched samples during dataloading - Reduced throughput variance (previously appearing mainly during inference) ### Known issues - The SIM model results are non-deterministic, even using the same random seed. The reason for this non-determinism is the [tf.math.unsorted_segment_sum](https://www.tensorflow.org/api_docs/python/tf/math/unsorted_segment_sum) operation called within an optimization step. Its influence depends on categorical data distribution within a batch, and this issue is more severe for momentum-based optimizers. A potential solution is to use a deterministic version of this op which allows perfect reproduction, but is up to six times slower training.
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2AttentionPlugin
taco2AttentionPlugin
taco2AttentionLayerPlugin
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_ATTENTIONLAYER_H #define TT2I_ATTENTIONLAYER_H #include "NvInfer.h" #include <memory> #include <string> #include <vector> namespace nvinfer1 { namespace plugin { class Taco2AttentionLayerKernel; class Taco2AttentionLayerPlugin : public nvinfer1::IPluginV2DynamicExt { public: using value_type = float; enum Inputs { MEMORY_INDEX = 0, PROCESSED_MEMORY_INDEX = 1, WEIGHT_INDEX = 2, ATTENTION_HIDDEN_INDEX = 3, NUM_INPUTS = 4 }; enum Outputs { CONTEXT_OUTPUT = 0, WEIGHT_OUTPUT = 1, NUM_OUTPUTS = 2 }; /** * @brief Get the name of this plugin. * * @return The name. */ static const char* getName(); /** * @brief Get the version of this plugin. * * @return The version. */ static const char* getVersion(); /** * @brief Create a new Taco2AttentionLayerPlugin from serialized data. * * @param data The data. * @param length The length of the data in bytes. * * @return The instantiated plugin. */ static Taco2AttentionLayerPlugin deserialize(const void* data, size_t length); /** * @brief Create a new Taco2AttentionLayerPlugin. * * @param inputLength The length of the input. * @param encDimension The number of encoding dimensions. * @param queryDimension The number of query dimensions. * @param numFilters The number of convolution filters. * @param convKernelSize The convolution kernel size. * @param attDimension The attention dimension. * @param queryWeights The query questions. * @param convWeights The convolution weights. * @param locationWeights The location weights. * @param energyWeights The energy weights. */ Taco2AttentionLayerPlugin(int encDimension, int queryDimension, int numFilters, int convKernelSize, int attDimension, const nvinfer1::Weights& queryWeights, const nvinfer1::Weights& convWeights, const nvinfer1::Weights& locationWeights, const nvinfer1::Weights& energyWeights); /** * @brief Move constructor. * * @param other The Taco2AttentionLayer to move. */ Taco2AttentionLayerPlugin(Taco2AttentionLayerPlugin&& other); /** * @brief Move assignment operator. * * @param other The Taco2AttentionLayerPlugin to move. * * @return This Taco2AttentionLayerPlugin. */ Taco2AttentionLayerPlugin& operator=(Taco2AttentionLayerPlugin&& other); /** * @brief Destructor. */ ~Taco2AttentionLayerPlugin(); // disable copying Taco2AttentionLayerPlugin(const Taco2AttentionLayerPlugin& other) = delete; Taco2AttentionLayerPlugin& operator=(const Taco2AttentionLayerPlugin& other) = delete; /** * @brief Return the data type of the plugin output at the requested index. * * @param index The output index. * @param inputTypes The input data types. * @param nbInputs The number of inputs. * * @return The type of output. */ nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override; /** * @brief Get the plugin type. * * @return The plugin type. */ const char* getPluginType() const override; /** * @brief Get the plugin version. * * @return The plugin version. */ const char* getPluginVersion() const override; /** * @brief Get the number of outputs. * * @return The number of outputs. */ int getNbOutputs() const override; /** * @brief Get the dimensions of an output tensor. * * @param outputIndex The index of the output tensor. * @param inputs Expressions for dimensions of the input tensors. * @param nbInputs The number of input tensors. * @param expBuilder Object for generating new expressions. * * @return The resulting dimensions. */ nvinfer1::DimsExprs getOutputDimensions( int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& expBuilder) override; /** * @brief Check if the given plugin format is supported. * * @param pos The format position/index in inOut.format[]. * @param inOut The input and output formats. * @param nbInputs The number of inputs. * @param nbOutputs The number of outputs. * * @return True if it is supported. */ bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) override; /** * @brief Configure this plugin with the given inputs, outputs, and datat * types. * * @param in The input tensor attributes that used for configuration. * @param nbInputs The number of inputs. * @param out The output tensor attributes that are used for configuration. * @param nbOutputs The number of outputs. */ void configurePlugin( const DynamicPluginTensorDesc* in, int nbInputs, const DynamicPluginTensorDesc* out, int nbOutputs) override; /** * @brief Initialize the plugin. * * @return 0 if initialization was successful. Non-zero otherwise. */ int initialize() override; /** * @brief Terminate the plugin (deinitialize). */ void terminate() override; /** * @brief Get workspace size required by this plugin for up to the given * batch size. * * @param in The input tensor descriptors. * @param nbInputs The number of inputs. * @param out The output tensor descriptors. * @param nbOutputs The number of outputs. * * @return The workspace size in bytes. */ size_t getWorkspaceSize( const PluginTensorDesc* in, int nbInputs, const PluginTensorDesc* out, int nbOutputs) const override; /** * @brief Set this plugin for execution on the stream. * * @param inputDesc The input tensor descriptors. * @param outputDesc The output tensor descriptors. * @param inputs The input tensors. * @param outputs The output tensors. * @param workspace The allocated workspace. * @param stream The stream to operate on. * * @return 0 if successfully queued, non-zero otherwise. */ int enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream); /** * @brief Get the number of bytes occupied by this plugin if serialized. * * @return The size in bytes. */ size_t getSerializationSize() const override; /** * @brief Serialize this plugin. * * @param buffer The buffer to write to. */ void serialize(void* buffer) const override; /** * @brief Destroy this plugin instance. */ void destroy() override; /** * @brief Clone this pulgin instance. * * @return The cloned plugin. */ IPluginV2DynamicExt* clone() const override; /** * @brief Set the namespace of this plugin. * * @param pluginNamespace The namespace. */ void setPluginNamespace(const char* pluginNamespace) override; /** * @brief Get the namespace of this plugin. * * @return The namespace. */ const char* getPluginNamespace() const override; private: int mNumEncodingDimension; int mNumQueryDimension; int mNumFilters; int mConvKernelSize; int mNumAttentionDimension; std::vector<value_type> mQueryWeightsHost; std::vector<value_type> mConvWeightsHost; std::vector<value_type> mLocationWeightsHost; std::vector<value_type> mEnergyWeightsHost; std::unique_ptr<Taco2AttentionLayerKernel> mKernel; std::string mNamespace; }; } // namespace plugin } // namespace nvinfer1 #endif
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts
scripts
build_benchmark_engines
#!/bin/bash ## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # MODEL_DIR="/models/" ENGINE_DIR="/engines/" TACOTRON2_ID="1c5ZTuT7J08wLUoVZ2KkUs_VdZuJ86ZqA" WAVEGLOW_ID="1WsibBTsuRg_SF2Z6L6NFRTT-NjEy1oTx" TACOTRON2_PT="${MODEL_DIR}/tacotron2.pt" WAVEGLOW_PT="${MODEL_DIR}/waveglow.pt" TACOTRON2_JSON="${MODEL_DIR}/tacotron2.json" WAVEGLOW_ONNX="${MODEL_DIR}/waveglow.onnx" DENOISER_JSON="${MODEL_DIR}/denoiser.json" HELPER_DIR="src/trt/helpers" BIN_DIR="./build/bin" BENCHMARK_BIN="${BIN_DIR}/benchmark" MAX_BATCH_SIZE=32 SCRIPT_DIR="$(dirname "${0}")" ENGINE_BUILD_SCRIPT="${SCRIPT_DIR}/build_engines.sh" die() { echo "ERROR: ${@}" 1>&2 exit 1 } download_gfile() { which curl &> /dev/null || die "Failed to find 'curl'." # download file from google drive local GOID="${1}" local filename="${2}" local GURL='https://drive.google.com/uc?export=download' local cookie="$(mktemp)" curl -sc "${cookie}" "${GURL}&id=${GOID}" local getcode="$(awk '/_warning_/ {print $NF}' "${cookie}")" curl -Lb "${cookie}" "${GURL}&confirm=${getcode}&id=${GOID}" -o "${filename}" rm "${cookie}" } mkdir -p "${ENGINE_DIR}" "${MODEL_DIR}" apt-get update -qy apt-get install -y libsndfile1 || die "Failed to install libsndfile" apt-get clean git clone --depth=1 https://github.com/NVIDIA/DeepLearningExamples TACO2_DIR="./DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/" # install required packages pip3 install "torch==1.3" onnx scipy librosa || die "Failed while installing python packages." # test packages python3 -c "import torch; import onnx; import scipy; import numpy; import librosa" || die "Python packages fail to import" ## build tacotron2 engine # download model download_gfile "${TACOTRON2_ID}" "${TACOTRON2_PT}" || die "Failed to get tacotron2.pt" # convert model to importable format ${HELPER_DIR}/tacotron2_to_json.py "${TACOTRON2_PT}" "${TACOTRON2_JSON}" || die "Failed to export tacotron2 to json." rm -v "${TACOTRON2_PT}" ## build wave glow engine # download model download_gfile "${WAVEGLOW_ID}" "${WAVEGLOW_PT}" || die "Failed to get waveglow.pt" # convert model to importable format ${HELPER_DIR}/waveglow_to_onnx.py \ -w "${WAVEGLOW_PT}" \ -W "${TACO2_DIR}" \ -o "${WAVEGLOW_ONNX}" \ --length_mels=160 || die "Failed to export waveglow to onnx." ## build denoiser engine ${HELPER_DIR}/denoiser_to_json.py "${TACO2_DIR}" "${WAVEGLOW_PT}" "${DENOISER_JSON}" || die "Failed to export denoiser to json." # wait to remove wave glow until after denoiser is finished rm -v "${WAVEGLOW_PT}" rm -rvf "./DeepLearningExamples" pip3 uninstall -qy torch onnx scipy apt-get purge -y libsndfile1 "${ENGINE_BUILD_SCRIPT}" || die "Failed to build engines"
TensorFlow2/Recommendation/WideAndDeep/triton
triton
README
# Deploying the Wide & Deep model on Triton Inference Server This folder contains instructions for deployment to run inference on Triton Inference Server as well as a detailed performance analysis. The purpose of this document is to help you with achieving the best inference performance. ## Table of contents - [Solution overview](#solution-overview) - [Introduction](#introduction) - [Deployment process](#deployment-process) - [Setup](#setup) - [Quick Start Guide](#quick-start-guide) - [Performance](#performance) - [Offline scenario](#offline-scenario) - [Offline: NVIDIA A30, TensorFlow with FP32](#offline-nvidia-a30-tensorflow-with-fp32) - [Offline: NVIDIA A30, NVIDIA TensorRT with FP16](#offline-nvidia-a30-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA DGX-1 (1x V100 32GB), TensorFlow with FP32](#offline-nvidia-dgx-1-1x-v100-32gb-tensorflow-with-fp32) - [Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), TensorFlow with FP32](#offline-nvidia-dgx-a100-1x-a100-80gb-tensorflow-with-fp32) - [Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA T4, TensorFlow with FP32](#offline-nvidia-t4-tensorflow-with-fp32) - [Offline: NVIDIA T4, NVIDIA TensorRT with FP16](#offline-nvidia-t4-nvidia-tensorrt-with-fp16) - [Online scenario](#online-scenario) - [Online: NVIDIA A30, TensorFlow with FP32](#online-nvidia-a30-tensorflow-with-fp32) - [Online: NVIDIA A30, NVIDIA TensorRT with FP16](#online-nvidia-a30-nvidia-tensorrt-with-fp16) - [Online: NVIDIA DGX-1 (1x V100 32GB), TensorFlow with FP32](#online-nvidia-dgx-1-1x-v100-32gb-tensorflow-with-fp32) - [Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16](#online-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16) - [Online: NVIDIA DGX A100 (1x A100 80GB), TensorFlow with FP32](#online-nvidia-dgx-a100-1x-a100-80gb-tensorflow-with-fp32) - [Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16) - [Online: NVIDIA T4, TensorFlow with FP32](#online-nvidia-t4-tensorflow-with-fp32) - [Online: NVIDIA T4, NVIDIA TensorRT with FP16](#online-nvidia-t4-nvidia-tensorrt-with-fp16) - [Advanced](#advanced) - [Step by step deployment process](#step-by-step-deployment-process) - [Latency explanation](#latency-explanation) - [Release notes](#release-notes) - [Changelog](#changelog) - [Known issues](#known-issues) ## Solution overview ### Introduction The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. This README provides step-by-step deployment instructions for models generated during training (as described in the [model README](../README.md)). Additionally, this README provides the corresponding deployment scripts that ensure optimal GPU utilization during inferencing on Triton Inference Server. ### Deployment process The deployment process consists of two steps: 1. Conversion. The purpose of conversion is to find the best performing model format supported by Triton Inference Server. Triton Inference Server uses a number of runtime backends such as [TensorRT](https://developer.nvidia.com/tensorrt), [LibTorch](https://github.com/triton-inference-server/pytorch_backend) and [ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend) to support various model types. Refer to the [Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton) for a list of available backends. 2. Configuration. Model configuration on Triton Inference Server, which generates necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md). After deployment, Triton inference server is used for evaluation of converted model in two steps: 1. Correctness tests. Produce results which are tested against given correctness thresholds. 2. Performance tests. Produce latency and throughput results for offline (static batching) and online (dynamic batching) scenarios. All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide) ## Setup Ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [NVIDIA TensorFlow NGC container 22.02](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tensorflow) * [NVIDIA Triton Inference Server NGC container 22.02](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver) * [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html) * [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU ## Quick Start Guide Running the following scripts will build and launch the container with all required dependencies for native TensorFlow2 as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow2/Recommendation/WideAndDeep ``` 2. Prepare the dataset. Assuming that the outbrain dataset is already generated inside `${HOST_OUTBRAIN_PATH}/data`. (using `scripts/preproc.sh`, see [model README](../README.md#quick-start-guide)) ``` mkdir -p ./datasets/outbrain cp -R ${HOST_OUTBRAIN_PATH}/data/valid ./datasets/outbrain ``` 3. Build and run a container that extends NGC TensorFlow2 with the Triton client libraries and necessary dependencies. ``` ./triton/scripts/docker/build.sh ./triton/scripts/docker/interactive.sh ``` 4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU). ``` NVIDIA A30: ./triton/runner/start_NVIDIA-A30.sh NVIDIA DGX-1 (1x V100 32GB): ./triton/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh NVIDIA DGX A100 (1x A100 80GB): ./triton/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh NVIDIA T4: ./triton/runner/start_NVIDIA-T4.sh ``` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Offline scenario The offline scenario assumes the client and server are located on the same host. The tests uses: - tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used - single request is send from client to server with static size of batch #### Offline: NVIDIA A30, TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_a30_experiment_6_triton_performance_offline_6/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_a30_experiment_6_triton_performance_offline_6/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 517.00 | 0.02 | 0.24 | 0.02 | 0.05 | 1.59 | 0.00 | 0.00 | 1.94 | 2.06 | 2.10 | 2.17 | 1.93 | | 16384 | 1 | 2654210.00 | 0.03 | 0.29 | 0.04 | 0.35 | 5.44 | 0.01 | 0.00 | 6.16 | 6.42 | 6.45 | 6.56 | 6.17 | | 32768 | 1 | 2916350.00 | 0.04 | 0.39 | 0.05 | 0.95 | 9.73 | 0.01 | 0.00 | 11.00 | 11.63 | 12.11 | 14.03 | 11.18 | | 49152 | 1 | 2973700.00 | 0.03 | 0.40 | 0.07 | 1.86 | 14.02 | 0.02 | 0.00 | 16.05 | 18.00 | 19.22 | 19.92 | 16.40 | | 65536 | 1 | 3058350.00 | 0.05 | 0.54 | 0.07 | 2.43 | 18.16 | 0.03 | 0.00 | 21.15 | 22.10 | 22.49 | 26.05 | 21.28 | | 81920 | 1 | 3139220.00 | 0.06 | 0.54 | 0.07 | 2.85 | 22.37 | 0.05 | 0.00 | 25.67 | 27.64 | 28.84 | 31.78 | 25.94 | | 98304 | 1 | 3244030.00 | 0.05 | 0.48 | 0.07 | 3.29 | 26.28 | 0.06 | 0.00 | 29.93 | 32.33 | 33.39 | 37.83 | 30.22 | | 114688 | 1 | 3297280.00 | 0.04 | 0.38 | 0.07 | 3.73 | 30.39 | 0.06 | 0.00 | 34.49 | 35.92 | 38.31 | 40.42 | 34.68 | | 131072 | 1 | 3308740.00 | 0.04 | 0.42 | 0.08 | 4.27 | 34.47 | 0.08 | 0.00 | 39.15 | 41.44 | 42.82 | 45.15 | 39.35 | </details> #### Offline: NVIDIA A30, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_a30_experiment_10_triton_performance_offline_10/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_a30_experiment_10_triton_performance_offline_10/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1455.00 | 0.02 | 0.19 | 0.02 | 0.22 | 0.23 | 0.01 | 0.00 | 0.69 | 0.70 | 0.71 | 0.73 | 0.68 | | 16384 | 1 | 4849660.00 | 0.05 | 0.33 | 0.02 | 0.51 | 2.43 | 0.03 | 0.00 | 3.41 | 3.53 | 3.58 | 3.61 | 3.37 | | 32768 | 1 | 6193150.00 | 0.03 | 0.27 | 0.02 | 0.68 | 4.25 | 0.02 | 0.00 | 5.30 | 5.42 | 5.44 | 5.46 | 5.27 | | 49152 | 1 | 5210110.00 | 0.03 | 0.44 | 0.03 | 0.82 | 8.07 | 0.02 | 0.00 | 9.47 | 9.69 | 9.73 | 9.77 | 9.43 | | 65536 | 1 | 6750210.00 | 0.06 | 0.52 | 0.06 | 0.96 | 8.05 | 0.03 | 0.00 | 9.70 | 9.91 | 9.95 | 10.00 | 9.68 | | 81920 | 1 | 4505600.00 | 0.06 | 0.51 | 0.06 | 1.03 | 16.38 | 0.04 | 0.00 | 18.07 | 18.39 | 18.51 | 18.82 | 18.07 | | 98304 | 1 | 5357570.00 | 0.06 | 0.52 | 0.06 | 1.20 | 16.35 | 0.04 | 0.00 | 18.24 | 18.51 | 18.59 | 18.74 | 18.23 | | 114688 | 1 | 6193150.00 | 0.06 | 0.54 | 0.07 | 1.47 | 16.32 | 0.05 | 0.00 | 18.52 | 18.81 | 18.86 | 19.08 | 18.51 | | 131072 | 1 | 7077890.00 | 0.06 | 0.54 | 0.07 | 1.65 | 15.98 | 0.06 | 0.00 | 18.36 | 18.66 | 18.72 | 18.94 | 18.36 | </details> #### Offline: NVIDIA DGX-1 (1x V100 32GB), TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_6_triton_performance_offline_6/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_6_triton_performance_offline_6/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 294.70 | 0.05 | 0.42 | 0.08 | 0.06 | 2.76 | 0.00 | 0.00 | 3.34 | 3.66 | 3.76 | 4.07 | 3.38 | | 16384 | 1 | 2146300.00 | 0.07 | 0.45 | 0.11 | 0.34 | 6.63 | 0.01 | 0.00 | 7.57 | 7.84 | 7.93 | 8.21 | 7.60 | | 32768 | 1 | 2669260.00 | 0.06 | 0.48 | 0.11 | 0.73 | 10.85 | 0.02 | 0.00 | 12.19 | 12.76 | 12.99 | 13.33 | 12.25 | | 49152 | 1 | 2947650.00 | 0.06 | 0.46 | 0.11 | 1.09 | 14.87 | 0.02 | 0.00 | 16.57 | 17.34 | 17.51 | 17.94 | 16.60 | | 65536 | 1 | 3145730.00 | 0.05 | 0.43 | 0.07 | 1.45 | 18.66 | 0.03 | 0.00 | 20.60 | 21.49 | 21.70 | 22.36 | 20.70 | | 81920 | 1 | 3222190.00 | 0.06 | 0.49 | 0.11 | 1.91 | 22.64 | 0.03 | 0.00 | 25.24 | 26.01 | 26.17 | 27.37 | 25.25 | | 98304 | 1 | 3309570.00 | 0.06 | 0.46 | 0.11 | 2.18 | 26.57 | 0.05 | 0.00 | 29.38 | 30.30 | 30.45 | 31.26 | 29.43 | | 114688 | 1 | 3354620.00 | 0.05 | 0.44 | 0.11 | 2.89 | 30.49 | 0.06 | 0.00 | 33.92 | 34.80 | 35.03 | 36.68 | 34.05 | | 131072 | 1 | 3309570.00 | 0.07 | 0.52 | 0.12 | 3.68 | 34.82 | 0.07 | 0.00 | 39.21 | 40.06 | 40.17 | 40.56 | 39.28 | </details> #### Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_10_triton_performance_offline_10/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_10_triton_performance_offline_10/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 749.25 | 0.07 | 0.41 | 0.06 | 0.35 | 0.41 | 0.03 | 0.00 | 1.32 | 1.41 | 1.44 | 1.53 | 1.33 | | 16384 | 1 | 3768320.00 | 0.05 | 0.47 | 0.11 | 0.66 | 2.99 | 0.05 | 0.00 | 4.33 | 4.42 | 4.46 | 4.65 | 4.34 | | 32768 | 1 | 4849660.00 | 0.05 | 0.45 | 0.11 | 0.85 | 5.21 | 0.06 | 0.00 | 6.72 | 6.82 | 6.84 | 6.90 | 6.72 | | 49152 | 1 | 4030460.00 | 0.06 | 0.49 | 0.13 | 1.41 | 9.97 | 0.10 | 0.00 | 12.14 | 12.28 | 12.32 | 12.52 | 12.16 | | 65536 | 1 | 5373950.00 | 0.06 | 0.48 | 0.12 | 1.55 | 9.91 | 0.06 | 0.00 | 12.17 | 12.32 | 12.36 | 12.93 | 12.19 | | 81920 | 1 | 3604480.00 | 0.07 | 0.53 | 0.13 | 2.39 | 19.50 | 0.09 | 0.00 | 22.64 | 22.85 | 22.92 | 24.87 | 22.70 | | 98304 | 1 | 4323940.00 | 0.08 | 0.52 | 0.13 | 2.30 | 19.52 | 0.08 | 0.00 | 22.46 | 23.03 | 23.41 | 26.04 | 22.63 | | 114688 | 1 | 5046270.00 | 0.06 | 0.44 | 0.11 | 2.66 | 19.35 | 0.10 | 0.00 | 22.67 | 22.87 | 23.08 | 23.96 | 22.72 | | 131072 | 1 | 5417640.00 | 0.07 | 0.55 | 0.13 | 4.23 | 19.06 | 0.12 | 0.00 | 24.35 | 24.47 | 24.63 | 25.48 | 24.17 | </details> #### Offline: NVIDIA DGX A100 (1x A100 80GB), TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_6_triton_performance_offline_6/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_6_triton_performance_offline_6/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 445.00 | 0.02 | 0.23 | 0.02 | 0.06 | 1.91 | 0.00 | 0.00 | 2.24 | 2.39 | 2.42 | 2.56 | 2.24 | | 16384 | 1 | 3440640.00 | 0.03 | 0.27 | 0.02 | 0.45 | 3.98 | 0.01 | 0.00 | 4.74 | 5.03 | 5.06 | 5.19 | 4.75 | | 32768 | 1 | 4554750.00 | 0.03 | 0.28 | 0.02 | 0.81 | 6.04 | 0.01 | 0.00 | 7.18 | 7.50 | 7.55 | 7.65 | 7.18 | | 49152 | 1 | 5013500.00 | 0.03 | 0.26 | 0.02 | 1.25 | 8.20 | 0.02 | 0.00 | 9.82 | 10.06 | 10.24 | 10.36 | 9.78 | | 65536 | 1 | 5174760.00 | 0.03 | 0.27 | 0.02 | 1.82 | 10.46 | 0.03 | 0.00 | 12.66 | 12.98 | 13.14 | 13.23 | 12.63 | | 81920 | 1 | 5160960.00 | 0.03 | 0.33 | 0.03 | 2.67 | 12.72 | 0.06 | 0.00 | 15.84 | 16.23 | 16.35 | 16.76 | 15.84 | | 98304 | 1 | 5455870.00 | 0.03 | 0.31 | 0.04 | 2.63 | 14.86 | 0.05 | 0.00 | 17.88 | 18.43 | 18.67 | 19.16 | 17.91 | | 114688 | 1 | 5657940.00 | 0.05 | 0.36 | 0.04 | 2.95 | 16.76 | 0.07 | 0.00 | 20.29 | 20.66 | 20.78 | 21.07 | 20.23 | | 131072 | 1 | 5546870.00 | 0.07 | 0.44 | 0.04 | 3.34 | 19.59 | 0.09 | 0.00 | 22.89 | 24.23 | 29.68 | 34.16 | 23.56 | </details> #### Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_10_triton_performance_offline_10/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_10_triton_performance_offline_10/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1108.00 | 0.02 | 0.26 | 0.02 | 0.34 | 0.25 | 0.02 | 0.00 | 0.89 | 0.91 | 0.97 | 1.35 | 0.90 | | 16384 | 1 | 7192580.00 | 0.02 | 0.27 | 0.02 | 0.52 | 1.41 | 0.03 | 0.00 | 2.24 | 2.31 | 2.37 | 3.36 | 2.27 | | 32768 | 1 | 9043970.00 | 0.02 | 0.34 | 0.03 | 0.72 | 2.46 | 0.05 | 0.00 | 3.57 | 3.67 | 3.75 | 5.35 | 3.62 | | 49152 | 1 | 7962620.00 | 0.02 | 0.28 | 0.03 | 1.17 | 4.57 | 0.05 | 0.00 | 5.97 | 6.14 | 6.28 | 9.31 | 6.13 | | 65536 | 1 | 9764860.00 | 0.02 | 0.28 | 0.03 | 1.77 | 4.51 | 0.06 | 0.00 | 6.59 | 7.01 | 7.24 | 7.59 | 6.68 | | 81920 | 1 | 7045120.00 | 0.02 | 0.28 | 0.03 | 2.49 | 8.66 | 0.07 | 0.00 | 11.45 | 12.10 | 12.34 | 12.60 | 11.56 | | 98304 | 1 | 8110080.00 | 0.02 | 0.28 | 0.03 | 3.02 | 8.65 | 0.08 | 0.00 | 11.97 | 12.66 | 13.00 | 13.19 | 12.08 | | 114688 | 1 | 9175040.00 | 0.02 | 0.29 | 0.03 | 3.40 | 8.64 | 0.09 | 0.00 | 12.43 | 12.69 | 12.77 | 12.89 | 12.48 | | 131072 | 1 | 10354700.00 | 0.02 | 0.27 | 0.03 | 3.84 | 8.37 | 0.10 | 0.00 | 12.57 | 12.77 | 13.02 | 13.16 | 12.63 | </details> #### Offline: NVIDIA T4, TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_t4_experiment_6_triton_performance_offline_6/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_t4_experiment_6_triton_performance_offline_6/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 181.00 | 0.09 | 0.78 | 0.13 | 0.19 | 4.32 | 0.02 | 0.00 | 5.52 | 6.18 | 6.30 | 6.59 | 5.52 | | 16384 | 1 | 1023490.00 | 0.12 | 0.96 | 0.17 | 0.86 | 13.82 | 0.04 | 0.00 | 15.95 | 16.92 | 17.16 | 17.49 | 15.98 | | 32768 | 1 | 1201090.00 | 0.12 | 0.96 | 0.18 | 1.50 | 24.31 | 0.06 | 0.00 | 27.14 | 28.06 | 28.18 | 28.40 | 27.12 | | 49152 | 1 | 1265350.00 | 0.12 | 0.96 | 0.18 | 2.30 | 35.08 | 0.07 | 0.00 | 38.60 | 39.79 | 40.11 | 43.47 | 38.70 | | 65536 | 1 | 1288870.00 | 0.12 | 0.94 | 0.18 | 3.13 | 46.14 | 0.11 | 0.00 | 50.54 | 51.51 | 51.68 | 57.69 | 50.63 | | 81920 | 1 | 1310530.00 | 0.12 | 0.94 | 0.18 | 3.86 | 56.84 | 0.13 | 0.00 | 61.96 | 63.21 | 63.36 | 64.08 | 62.06 | | 98304 | 1 | 1314650.00 | 0.12 | 1.01 | 0.18 | 4.38 | 68.40 | 0.14 | 0.00 | 74.34 | 75.17 | 75.40 | 76.45 | 74.24 | | 114688 | 1 | 1312390.00 | 0.13 | 1.00 | 0.16 | 5.75 | 79.94 | 0.19 | 0.00 | 87.31 | 88.67 | 89.27 | 89.89 | 87.18 | | 131072 | 1 | 1310590.00 | 0.13 | 1.03 | 0.17 | 6.29 | 91.81 | 0.20 | 0.00 | 99.64 | 101.02 | 101.41 | 101.68 | 99.63 | </details> #### Offline: NVIDIA T4, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_t4_experiment_10_triton_performance_offline_10/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_t4_experiment_10_triton_performance_offline_10/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 564.00 | 0.05 | 0.61 | 0.15 | 0.56 | 0.37 | 0.02 | 0.00 | 1.77 | 1.88 | 1.91 | 1.95 | 1.77 | | 16384 | 1 | 1916930.00 | 0.11 | 0.89 | 0.18 | 1.19 | 6.08 | 0.06 | 0.00 | 8.55 | 8.75 | 8.79 | 8.91 | 8.51 | | 32768 | 1 | 2129920.00 | 0.12 | 0.92 | 0.18 | 1.84 | 12.18 | 0.07 | 0.00 | 15.32 | 15.56 | 15.66 | 15.82 | 15.32 | | 49152 | 1 | 1703370.00 | 0.12 | 0.94 | 0.18 | 2.51 | 24.94 | 0.08 | 0.00 | 28.76 | 29.70 | 29.74 | 29.94 | 28.78 | | 65536 | 1 | 2228220.00 | 0.12 | 0.97 | 0.18 | 3.22 | 24.59 | 0.11 | 0.00 | 29.08 | 30.25 | 30.35 | 30.47 | 29.20 | | 81920 | 1 | 1447010.00 | 0.12 | 0.99 | 0.18 | 4.04 | 51.04 | 0.13 | 0.00 | 56.53 | 57.58 | 57.85 | 58.43 | 56.51 | | 98304 | 1 | 1720030.00 | 0.13 | 1.00 | 0.18 | 4.96 | 50.51 | 0.15 | 0.00 | 56.84 | 57.84 | 57.93 | 58.35 | 56.92 | | 114688 | 1 | 1987590.00 | 0.13 | 1.04 | 0.19 | 5.89 | 50.14 | 0.18 | 0.00 | 57.58 | 58.78 | 58.81 | 58.91 | 57.56 | | 131072 | 1 | 2271540.00 | 0.12 | 0.98 | 0.19 | 6.93 | 49.07 | 0.16 | 0.00 | 57.34 | 58.56 | 58.79 | 58.89 | 57.45 | </details> ### Online scenario The online scenario assumes the client and server are located on different hosts. The tests uses: - tensors are passed through HTTP from client to server - concurrent requests are send from client to server, the final batch is created on server side #### Online: NVIDIA A30, TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_6_triton_performance_online_6/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 2205700.00 | 0.46 | 2.09 | 0.99 | 0.31 | 3.53 | 0.02 | 0.00 | 7.91 | 8.42 | 8.74 | 9.36 | 7.40 | | 2048 | 16 | 2686980.00 | 0.46 | 2.83 | 2.38 | 0.51 | 5.91 | 0.03 | 0.00 | 12.64 | 13.41 | 13.90 | 15.69 | 12.12 | | 2048 | 24 | 2658300.00 | 0.47 | 4.46 | 3.75 | 1.25 | 8.24 | 0.05 | 0.00 | 18.65 | 20.78 | 22.22 | 27.96 | 18.21 | | 2048 | 32 | 2672640.00 | 0.47 | 4.46 | 6.46 | 1.74 | 11.02 | 0.08 | 0.00 | 24.53 | 27.26 | 28.82 | 30.28 | 24.23 | | 2048 | 40 | 3217410.00 | 0.47 | 5.12 | 3.96 | 1.78 | 13.76 | 0.07 | 0.00 | 24.11 | 29.70 | 31.31 | 32.02 | 25.17 | | 2048 | 48 | 3246080.00 | 0.50 | 5.77 | 5.01 | 2.45 | 15.96 | 0.10 | 0.00 | 28.87 | 36.61 | 39.55 | 44.82 | 29.78 | | 2048 | 56 | 3391490.00 | 0.48 | 5.52 | 5.74 | 2.21 | 19.18 | 0.10 | 0.00 | 32.74 | 36.93 | 39.33 | 44.67 | 33.24 | | 2048 | 64 | 3481600.00 | 0.50 | 5.98 | 6.83 | 2.90 | 20.61 | 0.12 | 0.00 | 36.78 | 39.41 | 41.34 | 44.04 | 36.94 | | 2048 | 72 | 3532800.00 | 0.51 | 7.84 | 5.61 | 2.75 | 23.65 | 0.14 | 0.00 | 40.06 | 42.18 | 43.18 | 45.15 | 40.49 | | 2048 | 80 | 3551230.00 | 0.51 | 8.02 | 8.24 | 3.04 | 25.05 | 0.14 | 0.00 | 44.82 | 46.05 | 46.43 | 47.17 | 45.01 | | 2048 | 88 | 3491840.00 | 0.55 | 6.85 | 10.81 | 3.81 | 27.98 | 0.14 | 0.00 | 49.97 | 51.88 | 52.12 | 54.34 | 50.13 | | 2048 | 96 | 3678210.00 | 0.49 | 6.44 | 10.60 | 2.42 | 31.40 | 0.13 | 0.00 | 51.33 | 52.85 | 53.52 | 55.37 | 51.48 | | 2048 | 104 | 3627010.00 | 0.51 | 8.84 | 11.81 | 3.21 | 32.91 | 0.13 | 0.00 | 56.68 | 59.57 | 65.27 | 69.32 | 57.42 | | 2048 | 112 | 3670020.00 | 0.50 | 10.27 | 11.60 | 3.22 | 35.39 | 0.17 | 0.00 | 60.96 | 62.94 | 63.78 | 66.09 | 61.14 | | 2048 | 120 | 3596290.00 | 0.53 | 8.14 | 15.83 | 3.52 | 37.44 | 0.18 | 0.00 | 65.69 | 68.82 | 69.33 | 70.23 | 65.64 | | 2048 | 128 | 3747840.00 | 0.53 | 9.94 | 13.78 | 3.35 | 39.42 | 0.18 | 0.00 | 67.36 | 68.44 | 68.70 | 69.57 | 67.19 | | 2048 | 136 | 3708930.00 | 0.50 | 11.62 | 15.82 | 4.05 | 40.59 | 0.22 | 0.00 | 73.04 | 76.44 | 77.91 | 78.35 | 72.81 | | 2048 | 144 | 3631100.00 | 0.53 | 13.62 | 17.34 | 4.16 | 42.39 | 0.27 | 0.00 | 78.38 | 81.03 | 81.55 | 82.67 | 78.31 | | 2048 | 152 | 3624960.00 | 0.51 | 16.29 | 16.20 | 4.06 | 45.15 | 0.25 | 0.00 | 82.34 | 87.68 | 95.84 | 107.03 | 82.47 | | 2048 | 160 | 3598340.00 | 0.52 | 12.15 | 19.21 | 4.13 | 49.93 | 0.26 | 0.00 | 88.03 | 91.12 | 92.91 | 94.12 | 86.20 | | 2048 | 168 | 3715450.00 | 0.53 | 15.01 | 17.67 | 4.03 | 50.90 | 0.24 | 0.00 | 89.14 | 92.45 | 93.39 | 95.30 | 88.37 | | 2048 | 176 | 3653630.00 | 0.56 | 10.28 | 23.72 | 4.36 | 52.77 | 0.29 | 0.00 | 93.17 | 94.98 | 95.73 | 96.99 | 91.98 | | 2048 | 184 | 3700740.00 | 0.58 | 15.49 | 20.40 | 4.19 | 55.47 | 0.24 | 0.00 | 96.35 | 101.44 | 102.26 | 103.61 | 96.37 | | 2048 | 192 | 3764220.00 | 0.56 | 12.25 | 26.51 | 5.04 | 56.14 | 0.24 | 0.00 | 100.51 | 103.64 | 104.54 | 107.29 | 100.76 | | 2048 | 200 | 3538940.00 | 0.58 | 10.53 | 34.43 | 4.16 | 55.98 | 0.26 | 0.00 | 101.11 | 130.28 | 133.07 | 139.67 | 105.94 | | 2048 | 208 | 3535410.00 | 0.63 | 10.26 | 39.10 | 4.42 | 57.79 | 0.26 | 0.00 | 104.99 | 137.09 | 138.30 | 139.86 | 112.48 | | 2048 | 216 | 3538940.00 | 0.58 | 13.14 | 40.62 | 5.04 | 55.45 | 0.28 | 0.00 | 106.08 | 135.93 | 137.98 | 138.84 | 115.12 | | 2048 | 224 | 3407870.00 | 0.70 | 12.87 | 46.33 | 4.61 | 57.95 | 0.26 | 0.00 | 130.57 | 142.24 | 143.32 | 147.15 | 122.72 | | 2048 | 232 | 3670020.00 | 0.54 | 14.55 | 46.11 | 4.51 | 57.72 | 0.25 | 0.00 | 131.59 | 138.97 | 139.92 | 141.23 | 123.68 | | 2048 | 240 | 3565570.00 | 0.56 | 13.52 | 51.26 | 4.62 | 56.97 | 0.25 | 0.00 | 134.50 | 138.74 | 140.46 | 143.67 | 127.18 | | 2048 | 248 | 3670020.00 | 0.63 | 17.72 | 50.87 | 4.79 | 58.02 | 0.27 | 0.00 | 135.65 | 139.44 | 140.59 | 142.06 | 132.28 | | 2048 | 256 | 3670020.00 | 0.60 | 12.77 | 61.03 | 4.50 | 57.72 | 0.27 | 0.00 | 135.72 | 142.43 | 143.26 | 145.82 | 136.88 | </details> #### Online: NVIDIA A30, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_a30_experiment_10_triton_performance_online_10/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 3377150.00 | 0.46 | 2.17 | 0.48 | 0.42 | 1.30 | 0.01 | 0.00 | 4.86 | 5.62 | 6.05 | 6.97 | 4.84 | | 2048 | 16 | 4280320.00 | 0.46 | 2.99 | 0.95 | 0.70 | 2.47 | 0.02 | 0.00 | 7.60 | 8.85 | 9.48 | 10.04 | 7.59 | | 2048 | 24 | 4155390.00 | 0.46 | 3.78 | 2.06 | 1.18 | 4.26 | 0.04 | 0.00 | 12.31 | 12.58 | 12.70 | 13.29 | 11.79 | | 2048 | 32 | 4634620.00 | 0.46 | 4.33 | 2.49 | 1.30 | 5.42 | 0.03 | 0.00 | 15.96 | 16.51 | 16.60 | 16.79 | 14.02 | | 2048 | 40 | 4114430.00 | 0.47 | 4.86 | 4.83 | 1.50 | 7.99 | 0.03 | 0.00 | 20.33 | 20.85 | 21.28 | 22.78 | 19.68 | | 2048 | 48 | 4751360.00 | 0.47 | 5.56 | 4.41 | 1.65 | 8.20 | 0.03 | 0.00 | 20.72 | 21.29 | 21.86 | 27.29 | 20.33 | | 2048 | 56 | 4876290.00 | 0.47 | 6.45 | 4.78 | 1.79 | 9.54 | 0.04 | 0.00 | 21.37 | 29.68 | 30.19 | 32.92 | 23.07 | | 2048 | 64 | 4403200.00 | 0.50 | 8.18 | 6.24 | 2.32 | 12.24 | 0.06 | 0.00 | 30.45 | 34.45 | 36.88 | 43.85 | 29.54 | | 2048 | 72 | 4696060.00 | 0.49 | 7.73 | 6.31 | 2.75 | 13.64 | 0.06 | 0.00 | 31.34 | 35.32 | 38.51 | 45.91 | 30.99 | | 2048 | 80 | 4929540.00 | 0.53 | 8.75 | 5.59 | 2.72 | 14.38 | 0.08 | 0.00 | 33.10 | 42.91 | 44.74 | 51.17 | 32.06 | | 2048 | 88 | 4378620.00 | 0.50 | 12.86 | 7.76 | 3.36 | 15.10 | 0.26 | 0.00 | 43.88 | 49.60 | 51.20 | 56.70 | 39.84 | | 2048 | 96 | 5371900.00 | 0.51 | 7.79 | 6.89 | 3.41 | 17.18 | 0.15 | 0.00 | 36.46 | 48.51 | 53.61 | 59.81 | 35.93 | | 2048 | 104 | 5129210.00 | 0.51 | 10.65 | 9.44 | 3.37 | 16.40 | 0.07 | 0.00 | 42.08 | 48.28 | 52.47 | 57.71 | 40.44 | | 2048 | 112 | 5058560.00 | 0.50 | 9.38 | 10.30 | 3.84 | 19.75 | 0.09 | 0.00 | 44.99 | 57.46 | 58.44 | 59.22 | 43.86 | | 2048 | 120 | 5435390.00 | 0.50 | 12.86 | 10.68 | 3.58 | 16.98 | 0.09 | 0.00 | 45.01 | 50.08 | 50.68 | 63.46 | 44.68 | | 2048 | 128 | 5499520.00 | 0.57 | 9.42 | 11.85 | 4.21 | 20.00 | 0.11 | 0.00 | 45.22 | 58.71 | 61.23 | 71.79 | 46.15 | | 2048 | 136 | 5584900.00 | 0.56 | 7.95 | 14.70 | 4.27 | 21.17 | 0.10 | 0.00 | 52.76 | 59.25 | 61.29 | 66.22 | 48.75 | | 2048 | 144 | 5828610.00 | 0.58 | 8.76 | 14.21 | 4.44 | 21.67 | 0.10 | 0.00 | 53.10 | 60.64 | 62.39 | 65.12 | 49.75 | | 2048 | 152 | 5812220.00 | 0.52 | 12.79 | 13.75 | 4.01 | 21.15 | 0.08 | 0.00 | 54.56 | 60.15 | 62.76 | 67.47 | 52.30 | | 2048 | 160 | 6000640.00 | 0.53 | 13.68 | 13.01 | 4.91 | 21.32 | 0.10 | 0.00 | 55.18 | 62.53 | 63.20 | 70.26 | 53.55 | | 2048 | 168 | 6053890.00 | 0.56 | 11.52 | 15.04 | 4.25 | 22.97 | 0.10 | 0.00 | 57.53 | 65.93 | 67.38 | 73.08 | 54.43 | | 2048 | 176 | 6443010.00 | 0.54 | 10.17 | 16.84 | 4.78 | 22.56 | 0.10 | 0.00 | 56.70 | 66.88 | 68.40 | 74.31 | 54.98 | | 2048 | 184 | 6369280.00 | 0.55 | 11.80 | 17.61 | 4.75 | 22.30 | 0.11 | 0.00 | 59.55 | 69.48 | 72.12 | 75.43 | 57.12 | | 2048 | 192 | 6166530.00 | 0.55 | 13.54 | 19.58 | 5.12 | 22.33 | 0.11 | 0.00 | 62.62 | 73.35 | 75.14 | 78.02 | 61.23 | | 2048 | 200 | 6432770.00 | 0.53 | 12.88 | 20.48 | 4.67 | 23.44 | 0.10 | 0.00 | 63.49 | 75.39 | 76.63 | 82.79 | 62.12 | | 2048 | 208 | 6539260.00 | 0.50 | 17.18 | 18.68 | 3.94 | 22.89 | 0.09 | 0.00 | 64.74 | 73.25 | 73.92 | 75.78 | 63.28 | | 2048 | 216 | 6420200.00 | 0.53 | 14.62 | 23.30 | 3.98 | 24.26 | 0.08 | 0.00 | 71.64 | 76.78 | 79.58 | 81.42 | 66.76 | | 2048 | 224 | 6457340.00 | 0.51 | 13.34 | 26.25 | 4.30 | 23.93 | 0.08 | 0.00 | 73.35 | 76.42 | 78.63 | 81.02 | 68.41 | | 2048 | 232 | 6793220.00 | 0.60 | 12.23 | 25.87 | 4.19 | 24.82 | 0.09 | 0.00 | 72.37 | 76.42 | 79.96 | 82.30 | 67.80 | | 2048 | 240 | 6778880.00 | 0.51 | 16.46 | 23.31 | 4.16 | 24.70 | 0.09 | 0.00 | 72.48 | 76.24 | 77.42 | 81.06 | 69.23 | | 2048 | 248 | 6877180.00 | 0.51 | 14.99 | 25.03 | 4.06 | 25.86 | 0.09 | 0.00 | 72.49 | 74.72 | 75.13 | 76.35 | 70.53 | | 2048 | 256 | 7071740.00 | 0.51 | 14.85 | 26.94 | 3.84 | 25.88 | 0.09 | 0.00 | 72.08 | 74.62 | 75.67 | 78.03 | 72.11 | </details> #### Online: NVIDIA DGX-1 (1x V100 32GB), TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_6_triton_performance_online_6/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 1406980.00 | 1.04 | 2.73 | 1.62 | 0.28 | 5.88 | 0.02 | 0.00 | 12.16 | 13.50 | 13.82 | 14.58 | 11.58 | | 2048 | 16 | 1937410.00 | 1.12 | 3.69 | 3.49 | 0.81 | 7.54 | 0.04 | 0.00 | 17.57 | 18.46 | 18.68 | 19.28 | 16.69 | | 2048 | 24 | 2236420.00 | 1.12 | 5.16 | 4.61 | 0.99 | 9.81 | 0.04 | 0.00 | 22.39 | 23.75 | 24.48 | 25.40 | 21.73 | | 2048 | 32 | 2439170.00 | 1.19 | 6.46 | 5.89 | 1.47 | 11.61 | 0.08 | 0.00 | 27.56 | 28.64 | 29.38 | 30.56 | 26.71 | | 2048 | 40 | 2586620.00 | 1.23 | 6.53 | 5.38 | 1.94 | 15.81 | 0.09 | 0.00 | 31.88 | 34.85 | 35.49 | 41.48 | 30.98 | | 2048 | 48 | 3145730.00 | 1.14 | 5.45 | 4.67 | 1.84 | 17.55 | 0.08 | 0.00 | 30.73 | 32.24 | 32.60 | 33.67 | 30.74 | | 2048 | 56 | 3211260.00 | 1.19 | 6.09 | 5.79 | 2.07 | 19.84 | 0.10 | 0.00 | 35.02 | 36.32 | 36.56 | 39.19 | 35.08 | | 2048 | 64 | 3229700.00 | 1.24 | 7.60 | 5.88 | 2.54 | 22.48 | 0.12 | 0.00 | 39.91 | 40.87 | 41.03 | 41.50 | 39.85 | | 2048 | 72 | 3231740.00 | 1.26 | 7.51 | 7.54 | 3.11 | 24.84 | 0.12 | 0.00 | 44.69 | 45.61 | 46.08 | 47.34 | 44.40 | | 2048 | 80 | 3325950.00 | 1.32 | 7.15 | 9.10 | 3.48 | 27.39 | 0.14 | 0.00 | 48.57 | 49.50 | 49.63 | 49.94 | 48.58 | | 2048 | 88 | 3303420.00 | 1.34 | 8.98 | 9.23 | 3.66 | 29.86 | 0.15 | 0.00 | 53.21 | 54.16 | 54.30 | 54.82 | 53.22 | | 2048 | 96 | 3407870.00 | 1.35 | 9.52 | 9.82 | 3.98 | 31.35 | 0.16 | 0.00 | 56.17 | 57.28 | 57.66 | 58.45 | 56.19 | | 2048 | 104 | 3352580.00 | 1.34 | 10.83 | 10.69 | 4.78 | 33.99 | 0.21 | 0.00 | 61.75 | 63.06 | 63.46 | 63.92 | 61.84 | | 2048 | 112 | 3299330.00 | 1.34 | 9.79 | 13.48 | 4.76 | 36.84 | 0.21 | 0.00 | 66.32 | 67.74 | 68.13 | 68.99 | 66.43 | | 2048 | 120 | 3483650.00 | 1.40 | 10.80 | 13.38 | 5.05 | 37.15 | 0.22 | 0.00 | 67.95 | 69.06 | 69.59 | 70.84 | 68.01 | | 2048 | 128 | 3391490.00 | 1.44 | 12.91 | 14.60 | 5.72 | 40.50 | 0.23 | 0.00 | 74.83 | 80.32 | 85.15 | 87.77 | 75.40 | | 2048 | 136 | 3339000.00 | 1.43 | 11.07 | 18.41 | 5.60 | 42.67 | 0.23 | 0.00 | 78.96 | 81.42 | 82.95 | 83.83 | 79.42 | | 2048 | 144 | 3430400.00 | 1.36 | 13.13 | 15.70 | 6.08 | 45.65 | 0.25 | 0.00 | 81.96 | 83.69 | 84.16 | 85.02 | 82.17 | | 2048 | 152 | 3424260.00 | 1.38 | 14.29 | 19.05 | 5.81 | 46.75 | 0.25 | 0.00 | 87.30 | 90.17 | 91.32 | 93.17 | 87.54 | | 2048 | 160 | 3522560.00 | 1.34 | 12.27 | 20.53 | 6.77 | 48.22 | 0.33 | 0.00 | 89.83 | 91.81 | 93.60 | 94.84 | 89.47 | | 2048 | 168 | 3475460.00 | 1.34 | 16.24 | 18.55 | 6.26 | 51.10 | 0.33 | 0.00 | 93.67 | 96.58 | 97.13 | 98.62 | 93.82 | | 2048 | 176 | 3352580.00 | 1.42 | 14.59 | 24.17 | 6.50 | 54.21 | 0.29 | 0.00 | 101.40 | 102.82 | 104.31 | 106.19 | 101.17 | | 2048 | 184 | 3391490.00 | 1.39 | 15.43 | 26.57 | 6.64 | 55.30 | 0.29 | 0.00 | 105.87 | 107.14 | 107.83 | 110.67 | 105.62 | | 2048 | 192 | 3291940.00 | 1.34 | 17.09 | 24.89 | 7.73 | 58.10 | 0.38 | 0.00 | 109.48 | 111.30 | 112.27 | 114.72 | 109.53 | | 2048 | 200 | 3407870.00 | 1.35 | 15.22 | 33.43 | 7.71 | 55.65 | 0.40 | 0.00 | 109.84 | 137.25 | 141.99 | 145.01 | 113.76 | | 2048 | 208 | 3276800.00 | 1.33 | 16.02 | 37.77 | 7.48 | 56.28 | 0.41 | 0.00 | 111.49 | 144.69 | 145.60 | 146.99 | 119.30 | | 2048 | 216 | 3403780.00 | 1.35 | 16.62 | 41.90 | 7.68 | 55.20 | 0.42 | 0.00 | 114.31 | 145.71 | 148.51 | 151.30 | 123.17 | | 2048 | 224 | 3407870.00 | 1.34 | 16.04 | 42.59 | 7.21 | 58.50 | 0.34 | 0.00 | 133.67 | 144.04 | 144.44 | 145.45 | 126.03 | | 2048 | 232 | 3538940.00 | 1.28 | 19.49 | 43.11 | 7.58 | 55.67 | 0.40 | 0.00 | 135.89 | 141.84 | 143.25 | 145.40 | 127.54 | | 2048 | 240 | 3407870.00 | 1.32 | 20.15 | 46.31 | 7.00 | 57.81 | 0.32 | 0.00 | 140.71 | 142.56 | 143.03 | 145.84 | 132.92 | | 2048 | 248 | 3538940.00 | 1.35 | 21.56 | 50.58 | 6.84 | 56.74 | 0.32 | 0.00 | 140.91 | 144.90 | 145.50 | 147.79 | 137.40 | | 2048 | 256 | 3407870.00 | 1.36 | 19.44 | 57.60 | 7.14 | 58.67 | 0.35 | 0.00 | 144.46 | 146.25 | 147.18 | 148.69 | 144.56 | </details> #### Online: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_10_triton_performance_online_10/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 2048000.00 | 1.06 | 2.99 | 0.85 | 0.66 | 2.39 | 0.02 | 0.00 | 7.68 | 10.34 | 10.43 | 10.81 | 7.97 | | 2048 | 16 | 3145730.00 | 1.09 | 3.97 | 1.30 | 0.74 | 3.20 | 0.02 | 0.00 | 10.40 | 11.76 | 12.62 | 13.54 | 10.33 | | 2048 | 24 | 3498580.00 | 1.08 | 4.65 | 1.85 | 1.07 | 5.31 | 0.03 | 0.00 | 14.55 | 14.74 | 14.80 | 14.96 | 14.00 | | 2048 | 32 | 3442690.00 | 1.10 | 5.76 | 3.37 | 1.58 | 7.15 | 0.05 | 0.00 | 20.14 | 22.84 | 25.34 | 30.42 | 19.01 | | 2048 | 40 | 3143680.00 | 1.08 | 6.13 | 6.01 | 2.04 | 10.28 | 0.08 | 0.00 | 26.23 | 26.40 | 26.45 | 26.53 | 25.61 | | 2048 | 48 | 3667970.00 | 1.18 | 7.33 | 5.16 | 2.39 | 10.00 | 0.06 | 0.00 | 26.91 | 34.01 | 37.07 | 44.18 | 26.13 | | 2048 | 56 | 3358720.00 | 1.25 | 9.03 | 7.05 | 3.40 | 13.12 | 0.19 | 0.00 | 33.38 | 39.30 | 42.76 | 43.68 | 34.04 | | 2048 | 64 | 3710980.00 | 1.22 | 9.79 | 6.91 | 3.24 | 13.76 | 0.08 | 0.00 | 35.20 | 46.53 | 51.79 | 58.32 | 35.00 | | 2048 | 72 | 3532800.00 | 1.27 | 9.94 | 9.88 | 4.50 | 15.88 | 0.10 | 0.00 | 42.32 | 45.32 | 47.01 | 48.25 | 41.57 | | 2048 | 80 | 3665920.00 | 1.22 | 11.04 | 9.87 | 4.50 | 17.26 | 0.11 | 0.00 | 43.84 | 54.40 | 57.38 | 69.41 | 44.01 | | 2048 | 88 | 3731460.00 | 1.23 | 12.38 | 9.62 | 4.46 | 18.84 | 0.13 | 0.00 | 49.49 | 60.37 | 64.00 | 70.11 | 46.66 | | 2048 | 96 | 3596290.00 | 1.30 | 16.28 | 10.75 | 5.33 | 19.79 | 0.12 | 0.00 | 56.59 | 60.01 | 64.17 | 65.67 | 53.58 | | 2048 | 104 | 4042750.00 | 1.27 | 11.76 | 11.13 | 5.64 | 21.24 | 0.11 | 0.00 | 51.11 | 63.48 | 68.62 | 81.35 | 51.16 | | 2048 | 112 | 4302850.00 | 1.26 | 13.81 | 10.84 | 5.57 | 20.82 | 0.14 | 0.00 | 52.92 | 65.63 | 70.87 | 73.82 | 52.42 | | 2048 | 120 | 4065280.00 | 1.32 | 15.44 | 14.97 | 5.56 | 21.00 | 0.12 | 0.00 | 67.00 | 71.61 | 73.35 | 74.64 | 58.40 | | 2048 | 128 | 4298750.00 | 1.33 | 11.38 | 14.66 | 6.38 | 24.46 | 0.14 | 0.00 | 57.55 | 74.42 | 75.14 | 75.96 | 58.34 | | 2048 | 136 | 4440060.00 | 1.26 | 14.78 | 14.21 | 6.41 | 23.96 | 0.13 | 0.00 | 65.35 | 75.63 | 79.32 | 84.87 | 60.76 | | 2048 | 144 | 4425730.00 | 1.24 | 18.63 | 15.28 | 6.32 | 22.56 | 0.16 | 0.00 | 67.99 | 76.48 | 78.16 | 82.24 | 64.18 | | 2048 | 152 | 4554750.00 | 1.27 | 16.37 | 15.28 | 6.73 | 25.72 | 0.16 | 0.00 | 67.57 | 76.59 | 78.25 | 89.80 | 65.55 | | 2048 | 160 | 4818940.00 | 1.31 | 14.22 | 16.23 | 7.65 | 25.71 | 0.16 | 0.00 | 67.81 | 78.92 | 83.34 | 108.24 | 65.27 | | 2048 | 168 | 4800510.00 | 1.28 | 18.55 | 16.74 | 7.45 | 25.97 | 0.15 | 0.00 | 72.54 | 85.59 | 90.37 | 99.32 | 70.16 | | 2048 | 176 | 4806660.00 | 1.27 | 17.32 | 19.00 | 7.16 | 25.83 | 0.14 | 0.00 | 73.55 | 85.24 | 86.53 | 89.98 | 70.73 | | 2048 | 184 | 4990980.00 | 1.29 | 16.53 | 21.14 | 7.73 | 26.74 | 0.17 | 0.00 | 76.09 | 89.39 | 94.63 | 107.31 | 73.61 | | 2048 | 192 | 4716540.00 | 1.30 | 19.81 | 22.68 | 8.54 | 25.97 | 0.19 | 0.00 | 79.06 | 96.15 | 97.55 | 102.86 | 78.48 | | 2048 | 200 | 5038080.00 | 1.26 | 18.76 | 25.63 | 7.40 | 27.24 | 0.16 | 0.00 | 84.41 | 94.34 | 95.84 | 102.56 | 80.47 | | 2048 | 208 | 4812800.00 | 1.32 | 17.13 | 27.08 | 8.30 | 28.11 | 0.16 | 0.00 | 87.32 | 96.77 | 107.36 | 120.31 | 82.10 | | 2048 | 216 | 4954110.00 | 1.26 | 19.71 | 27.37 | 7.18 | 28.52 | 0.18 | 0.00 | 87.99 | 101.20 | 106.30 | 126.02 | 84.20 | | 2048 | 224 | 5228540.00 | 1.31 | 19.02 | 27.14 | 7.92 | 29.33 | 0.15 | 0.00 | 90.37 | 99.31 | 105.15 | 114.43 | 84.87 | | 2048 | 232 | 5242880.00 | 1.28 | 20.97 | 25.23 | 7.82 | 30.44 | 0.19 | 0.00 | 93.14 | 96.63 | 98.39 | 100.47 | 85.92 | | 2048 | 240 | 5398530.00 | 1.29 | 19.49 | 29.21 | 7.61 | 30.08 | 0.17 | 0.00 | 92.47 | 95.17 | 95.65 | 98.29 | 87.85 | | 2048 | 248 | 5275650.00 | 1.32 | 23.67 | 29.61 | 8.34 | 28.65 | 0.17 | 0.00 | 93.53 | 97.05 | 99.55 | 100.99 | 91.75 | | 2048 | 256 | 5261310.00 | 1.25 | 31.75 | 25.61 | 7.47 | 28.55 | 0.16 | 0.00 | 95.47 | 113.51 | 118.44 | 122.77 | 94.79 | </details> #### Online: NVIDIA DGX A100 (1x A100 80GB), TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_6_triton_performance_online_6/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 2117630.00 | 0.39 | 1.86 | 1.24 | 0.32 | 3.89 | 0.02 | 0.00 | 7.91 | 9.30 | 9.66 | 10.36 | 7.72 | | 2048 | 16 | 3072000.00 | 0.45 | 2.50 | 2.32 | 0.58 | 4.76 | 0.02 | 0.00 | 10.98 | 12.15 | 12.70 | 13.56 | 10.64 | | 2048 | 24 | 3350530.00 | 0.46 | 3.55 | 3.22 | 0.96 | 6.28 | 0.04 | 0.00 | 15.04 | 16.18 | 16.47 | 17.53 | 14.50 | | 2048 | 32 | 3788800.00 | 0.47 | 3.52 | 3.79 | 1.35 | 7.92 | 0.07 | 0.00 | 17.34 | 19.63 | 19.96 | 21.71 | 17.11 | | 2048 | 40 | 4411390.00 | 0.45 | 4.82 | 3.28 | 1.25 | 8.54 | 0.07 | 0.00 | 18.94 | 21.93 | 22.89 | 25.76 | 18.41 | | 2048 | 48 | 5271550.00 | 0.44 | 3.44 | 3.06 | 1.88 | 9.59 | 0.07 | 0.00 | 18.58 | 19.17 | 19.40 | 19.91 | 18.48 | | 2048 | 56 | 5116930.00 | 0.44 | 5.64 | 3.41 | 1.96 | 10.72 | 0.09 | 0.00 | 21.15 | 27.85 | 29.67 | 35.70 | 22.26 | | 2048 | 64 | 5462700.00 | 0.45 | 4.74 | 3.81 | 2.23 | 12.30 | 0.10 | 0.00 | 23.67 | 24.60 | 24.85 | 25.19 | 23.63 | | 2048 | 72 | 5603330.00 | 0.49 | 4.72 | 4.88 | 2.57 | 13.15 | 0.13 | 0.00 | 26.01 | 26.96 | 27.19 | 27.60 | 25.94 | | 2048 | 80 | 5730300.00 | 0.49 | 5.77 | 4.66 | 2.69 | 14.52 | 0.13 | 0.00 | 28.26 | 28.98 | 29.24 | 29.64 | 28.26 | | 2048 | 88 | 5304320.00 | 0.56 | 6.82 | 6.48 | 3.50 | 15.72 | 0.15 | 0.00 | 32.96 | 41.00 | 43.34 | 50.28 | 33.24 | | 2048 | 96 | 6078460.00 | 0.47 | 7.48 | 5.44 | 3.06 | 15.20 | 0.14 | 0.00 | 30.81 | 40.15 | 41.58 | 45.13 | 31.80 | | 2048 | 104 | 5795840.00 | 0.51 | 7.24 | 6.82 | 3.19 | 17.82 | 0.17 | 0.00 | 36.06 | 44.67 | 48.78 | 50.98 | 35.75 | | 2048 | 112 | 6309890.00 | 0.48 | 8.32 | 6.55 | 3.03 | 17.26 | 0.16 | 0.00 | 35.22 | 40.60 | 45.25 | 54.10 | 35.79 | | 2048 | 120 | 6070350.00 | 0.48 | 7.32 | 8.34 | 4.02 | 19.39 | 0.22 | 0.00 | 39.67 | 52.07 | 55.22 | 62.96 | 39.78 | | 2048 | 128 | 5603330.00 | 0.48 | 11.37 | 9.76 | 3.65 | 19.80 | 0.21 | 0.00 | 45.55 | 56.76 | 57.75 | 60.84 | 45.28 | | 2048 | 136 | 6342660.00 | 0.47 | 10.50 | 7.40 | 3.40 | 20.62 | 0.19 | 0.00 | 42.67 | 43.36 | 43.68 | 44.46 | 42.58 | | 2048 | 144 | 6160380.00 | 0.51 | 9.38 | 9.72 | 3.96 | 22.94 | 0.22 | 0.00 | 47.19 | 50.34 | 53.58 | 62.89 | 46.73 | | 2048 | 152 | 6162430.00 | 0.50 | 9.35 | 11.24 | 4.06 | 24.05 | 0.22 | 0.00 | 49.62 | 50.93 | 51.40 | 52.12 | 49.43 | | 2048 | 160 | 6594560.00 | 0.48 | 9.26 | 10.48 | 4.33 | 23.77 | 0.23 | 0.00 | 48.82 | 49.97 | 50.25 | 51.14 | 48.55 | | 2048 | 168 | 6289410.00 | 0.54 | 8.81 | 14.30 | 4.31 | 25.26 | 0.23 | 0.00 | 53.23 | 54.47 | 54.93 | 64.09 | 53.46 | | 2048 | 176 | 6547460.00 | 0.51 | 9.67 | 13.64 | 4.92 | 24.76 | 0.27 | 0.00 | 54.30 | 56.66 | 58.01 | 60.22 | 53.78 | | 2048 | 184 | 6520830.00 | 0.53 | 9.43 | 14.56 | 4.54 | 27.26 | 0.25 | 0.00 | 57.16 | 59.69 | 60.11 | 60.62 | 56.57 | | 2048 | 192 | 6547460.00 | 0.51 | 9.44 | 16.16 | 4.73 | 27.80 | 0.25 | 0.00 | 58.92 | 59.96 | 60.35 | 62.24 | 58.90 | | 2048 | 200 | 6160380.00 | 0.55 | 9.65 | 23.18 | 6.02 | 25.12 | 0.33 | 0.00 | 62.63 | 79.47 | 81.42 | 83.06 | 64.86 | | 2048 | 208 | 6553600.00 | 0.51 | 7.52 | 23.98 | 5.24 | 25.65 | 0.28 | 0.00 | 59.00 | 77.14 | 77.89 | 79.00 | 63.17 | | 2048 | 216 | 6422530.00 | 0.51 | 9.04 | 23.01 | 4.66 | 27.98 | 0.27 | 0.00 | 59.66 | 77.53 | 77.99 | 78.71 | 65.46 | | 2048 | 224 | 6422530.00 | 0.52 | 9.61 | 24.15 | 4.55 | 28.86 | 0.24 | 0.00 | 70.81 | 78.24 | 78.68 | 80.45 | 67.94 | | 2048 | 232 | 6422530.00 | 0.51 | 9.64 | 28.58 | 4.57 | 28.17 | 0.26 | 0.00 | 78.30 | 79.89 | 80.26 | 81.71 | 71.72 | | 2048 | 240 | 6684670.00 | 0.50 | 11.40 | 26.54 | 4.61 | 27.96 | 0.25 | 0.00 | 74.96 | 77.42 | 79.14 | 80.80 | 71.26 | | 2048 | 248 | 6408190.00 | 0.49 | 12.28 | 29.09 | 4.86 | 28.87 | 0.26 | 0.00 | 77.54 | 81.01 | 82.15 | 82.76 | 75.85 | | 2048 | 256 | 6553600.00 | 0.50 | 10.44 | 32.74 | 4.35 | 29.02 | 0.25 | 0.00 | 77.27 | 78.51 | 78.74 | 80.09 | 77.31 | </details> #### Online: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_10_triton_performance_online_10/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 3344380.00 | 0.39 | 2.26 | 0.59 | 0.58 | 1.05 | 0.02 | 0.00 | 4.83 | 6.14 | 6.32 | 6.84 | 4.89 | | 2048 | 16 | 5148670.00 | 0.40 | 3.14 | 0.78 | 0.69 | 1.31 | 0.03 | 0.00 | 6.21 | 7.78 | 8.12 | 9.11 | 6.34 | | 2048 | 24 | 6113280.00 | 0.42 | 3.33 | 1.07 | 0.98 | 2.18 | 0.03 | 0.00 | 8.18 | 9.32 | 10.14 | 11.37 | 8.00 | | 2048 | 32 | 6434820.00 | 0.45 | 4.10 | 1.43 | 1.26 | 2.84 | 0.04 | 0.00 | 10.59 | 12.07 | 12.65 | 14.35 | 10.10 | | 2048 | 40 | 6946820.00 | 0.46 | 4.01 | 2.14 | 1.49 | 3.59 | 0.04 | 0.00 | 12.16 | 14.78 | 15.71 | 17.81 | 11.72 | | 2048 | 48 | 6770690.00 | 0.43 | 5.27 | 2.43 | 1.80 | 4.39 | 0.05 | 0.00 | 14.98 | 16.24 | 16.47 | 19.71 | 14.38 | | 2048 | 56 | 7225340.00 | 0.44 | 6.06 | 2.32 | 2.28 | 4.63 | 0.06 | 0.00 | 16.07 | 18.89 | 20.43 | 22.38 | 15.79 | | 2048 | 64 | 7217150.00 | 0.46 | 6.95 | 2.74 | 2.32 | 5.57 | 0.09 | 0.00 | 18.45 | 22.95 | 24.41 | 29.97 | 18.11 | | 2048 | 72 | 7436290.00 | 0.46 | 6.99 | 3.44 | 2.32 | 6.45 | 0.08 | 0.00 | 21.05 | 25.17 | 27.20 | 32.09 | 19.74 | | 2048 | 80 | 7757820.00 | 0.46 | 7.62 | 3.36 | 2.31 | 6.90 | 0.10 | 0.00 | 21.30 | 27.73 | 29.03 | 32.30 | 20.75 | | 2048 | 88 | 8118270.00 | 0.46 | 6.24 | 4.01 | 3.14 | 8.00 | 0.10 | 0.00 | 21.97 | 30.04 | 32.84 | 35.90 | 21.94 | | 2048 | 96 | 7417860.00 | 0.47 | 9.43 | 3.91 | 3.66 | 8.74 | 0.11 | 0.00 | 27.65 | 28.81 | 29.30 | 29.67 | 26.31 | | 2048 | 104 | 7948290.00 | 0.46 | 10.29 | 3.97 | 3.18 | 8.49 | 0.09 | 0.00 | 29.04 | 32.34 | 33.58 | 35.17 | 26.48 | | 2048 | 112 | 8038400.00 | 0.44 | 9.26 | 5.20 | 3.61 | 9.38 | 0.09 | 0.00 | 30.38 | 35.36 | 36.63 | 40.85 | 28.00 | | 2048 | 120 | 8720380.00 | 0.46 | 8.97 | 5.44 | 3.47 | 9.39 | 0.10 | 0.00 | 29.91 | 34.33 | 36.08 | 38.36 | 27.84 | | 2048 | 128 | 8339460.00 | 0.47 | 11.57 | 5.64 | 3.92 | 9.35 | 0.11 | 0.00 | 33.52 | 38.02 | 39.32 | 42.58 | 31.06 | | 2048 | 136 | 9078780.00 | 0.47 | 11.30 | 5.39 | 3.76 | 9.01 | 0.11 | 0.00 | 32.31 | 34.56 | 34.98 | 36.55 | 30.03 | | 2048 | 144 | 8794110.00 | 0.50 | 10.94 | 7.06 | 4.39 | 9.72 | 0.10 | 0.00 | 37.18 | 41.52 | 42.72 | 45.80 | 32.73 | | 2048 | 152 | 9527300.00 | 0.52 | 9.28 | 7.14 | 4.84 | 10.36 | 0.12 | 0.00 | 32.24 | 43.32 | 46.39 | 49.35 | 32.26 | | 2048 | 160 | 8984580.00 | 0.50 | 13.36 | 7.18 | 4.37 | 10.19 | 0.11 | 0.00 | 38.15 | 45.08 | 48.00 | 54.98 | 35.71 | | 2048 | 168 | 9719810.00 | 0.46 | 14.35 | 5.22 | 4.25 | 10.02 | 0.12 | 0.00 | 39.62 | 40.55 | 40.89 | 42.70 | 34.42 | | 2048 | 176 | 10377200.00 | 0.49 | 10.02 | 7.91 | 4.47 | 10.81 | 0.11 | 0.00 | 35.38 | 43.50 | 45.14 | 47.50 | 33.80 | | 2048 | 184 | 9897980.00 | 0.51 | 12.32 | 8.22 | 5.05 | 10.56 | 0.10 | 0.00 | 37.49 | 46.92 | 48.81 | 51.65 | 36.76 | | 2048 | 192 | 10129400.00 | 0.51 | 12.08 | 9.12 | 5.20 | 10.59 | 0.13 | 0.00 | 39.06 | 46.15 | 47.62 | 50.35 | 37.64 | | 2048 | 200 | 10266600.00 | 0.48 | 13.34 | 9.49 | 4.87 | 10.76 | 0.12 | 0.00 | 40.57 | 48.12 | 50.15 | 54.61 | 39.06 | | 2048 | 208 | 10154000.00 | 0.52 | 15.22 | 9.31 | 5.52 | 10.54 | 0.13 | 0.00 | 43.40 | 48.65 | 50.03 | 54.64 | 41.25 | | 2048 | 216 | 10244100.00 | 0.49 | 14.22 | 11.24 | 5.25 | 10.88 | 0.12 | 0.00 | 44.13 | 49.72 | 52.48 | 56.64 | 42.20 | | 2048 | 224 | 10235900.00 | 0.45 | 18.12 | 9.39 | 5.08 | 10.62 | 0.11 | 0.00 | 45.97 | 53.80 | 55.77 | 59.17 | 43.79 | | 2048 | 232 | 10397700.00 | 0.47 | 17.96 | 10.05 | 5.68 | 10.37 | 0.12 | 0.00 | 46.76 | 57.00 | 59.62 | 63.52 | 44.64 | | 2048 | 240 | 10287100.00 | 0.46 | 21.07 | 9.12 | 5.01 | 10.69 | 0.13 | 0.00 | 47.68 | 58.98 | 60.64 | 63.76 | 46.48 | | 2048 | 248 | 11300900.00 | 0.50 | 12.09 | 14.32 | 5.37 | 11.27 | 0.12 | 0.00 | 44.80 | 46.68 | 47.27 | 49.97 | 43.66 | | 2048 | 256 | 11272200.00 | 0.50 | 11.16 | 16.80 | 5.26 | 11.49 | 0.11 | 0.00 | 45.30 | 47.72 | 49.84 | 56.30 | 45.34 | </details> #### Online: NVIDIA T4, TensorFlow with FP32 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Backend |TensorFlow | | Backend accelerator |Automatic FP16| | Precision |FP32 | | Model format |TensorFlow SavedModel | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_6_triton_performance_online_6/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 865871.00 | 1.10 | 4.48 | 3.04 | 0.57 | 9.61 | 0.04 | 0.00 | 19.84 | 22.06 | 22.56 | 23.29 | 18.84 | | 2048 | 16 | 1089540.00 | 1.09 | 5.22 | 7.21 | 1.12 | 15.25 | 0.06 | 0.00 | 31.43 | 33.89 | 34.91 | 36.03 | 29.95 | | 2048 | 24 | 1099780.00 | 1.31 | 6.78 | 10.59 | 1.88 | 22.41 | 0.10 | 0.00 | 44.68 | 47.61 | 48.06 | 48.61 | 43.06 | | 2048 | 32 | 1171460.00 | 1.37 | 8.07 | 13.38 | 2.46 | 28.96 | 0.12 | 0.00 | 56.02 | 59.78 | 60.17 | 60.71 | 54.35 | | 2048 | 40 | 1325780.00 | 1.40 | 6.04 | 13.19 | 2.44 | 37.20 | 0.12 | 0.00 | 60.64 | 63.12 | 63.94 | 71.32 | 60.39 | | 2048 | 48 | 1376260.00 | 1.39 | 8.23 | 12.70 | 2.71 | 44.43 | 0.14 | 0.00 | 69.42 | 71.25 | 71.74 | 72.17 | 69.59 | | 2048 | 56 | 1376260.00 | 1.44 | 8.59 | 18.14 | 2.68 | 50.12 | 0.14 | 0.00 | 81.22 | 82.90 | 83.64 | 85.12 | 81.11 | | 2048 | 64 | 1368060.00 | 1.51 | 8.70 | 21.25 | 3.35 | 57.52 | 0.18 | 0.00 | 92.50 | 94.70 | 95.23 | 96.06 | 92.51 | | 2048 | 72 | 1372160.00 | 1.51 | 9.72 | 24.49 | 3.77 | 63.79 | 0.19 | 0.00 | 103.07 | 107.19 | 107.84 | 108.11 | 103.48 | | 2048 | 80 | 1310720.00 | 1.38 | 9.70 | 27.25 | 4.10 | 72.40 | 0.22 | 0.00 | 114.95 | 117.67 | 118.11 | 118.94 | 115.04 | | 2048 | 88 | 1308670.00 | 1.58 | 11.56 | 26.68 | 4.21 | 81.20 | 0.25 | 0.00 | 125.08 | 129.18 | 129.83 | 130.91 | 125.48 | | 2048 | 96 | 1347580.00 | 1.65 | 11.22 | 32.70 | 4.69 | 87.01 | 0.27 | 0.00 | 137.81 | 139.51 | 140.49 | 143.02 | 137.55 | | 2048 | 104 | 1347580.00 | 1.69 | 9.35 | 40.72 | 4.42 | 90.71 | 0.25 | 0.00 | 147.06 | 149.22 | 149.70 | 150.16 | 147.15 | | 2048 | 112 | 1314820.00 | 1.67 | 11.60 | 42.33 | 5.27 | 97.35 | 0.28 | 0.00 | 160.13 | 165.58 | 174.67 | 182.71 | 158.50 | | 2048 | 120 | 1259520.00 | 1.68 | 12.02 | 45.84 | 5.43 | 105.70 | 0.30 | 0.00 | 170.64 | 174.06 | 175.21 | 176.62 | 170.98 | | 2048 | 128 | 1318910.00 | 1.80 | 11.93 | 50.38 | 5.84 | 112.15 | 0.32 | 0.00 | 182.70 | 186.44 | 187.30 | 187.74 | 182.42 | | 2048 | 136 | 1314820.00 | 1.70 | 17.22 | 46.92 | 6.63 | 120.48 | 0.44 | 0.00 | 192.88 | 196.29 | 196.85 | 201.14 | 193.39 | | 2048 | 144 | 1311460.00 | 1.68 | 16.08 | 51.66 | 6.63 | 127.27 | 0.39 | 0.00 | 203.93 | 207.14 | 208.27 | 210.94 | 203.72 | | 2048 | 152 | 1267710.00 | 1.66 | 15.52 | 58.86 | 6.65 | 133.29 | 0.38 | 0.00 | 216.69 | 221.59 | 228.32 | 228.91 | 216.36 | | 2048 | 160 | 1200130.00 | 1.67 | 15.44 | 63.33 | 6.73 | 140.23 | 0.38 | 0.00 | 228.08 | 230.84 | 232.18 | 235.98 | 227.78 | | 2048 | 168 | 1290240.00 | 1.72 | 15.64 | 65.90 | 7.50 | 147.90 | 0.40 | 0.00 | 239.57 | 242.45 | 246.57 | 251.30 | 239.07 | | 2048 | 176 | 1317590.00 | 1.64 | 14.87 | 72.50 | 7.94 | 153.87 | 0.41 | 0.00 | 251.88 | 256.37 | 259.48 | 260.15 | 251.23 | | 2048 | 184 | 1247230.00 | 1.72 | 14.28 | 75.90 | 8.05 | 162.36 | 0.44 | 0.00 | 263.65 | 265.82 | 266.30 | 268.95 | 262.75 | | 2048 | 192 | 1251330.00 | 1.69 | 15.09 | 79.04 | 9.36 | 168.48 | 0.47 | 0.00 | 274.96 | 277.44 | 278.19 | 279.32 | 274.14 | | 2048 | 200 | 1179650.00 | 1.66 | 14.45 | 93.11 | 7.82 | 167.90 | 0.44 | 0.00 | 274.52 | 358.83 | 362.49 | 364.92 | 285.37 | | 2048 | 208 | 1179650.00 | 1.59 | 14.07 | 104.92 | 8.14 | 168.38 | 0.46 | 0.00 | 276.92 | 363.75 | 364.94 | 367.04 | 297.58 | | 2048 | 216 | 1179650.00 | 1.66 | 15.02 | 115.94 | 7.78 | 166.93 | 0.50 | 0.00 | 277.43 | 364.02 | 365.33 | 366.67 | 307.84 | | 2048 | 224 | 1178470.00 | 1.64 | 14.27 | 128.81 | 8.77 | 166.54 | 0.47 | 0.00 | 358.49 | 366.57 | 367.23 | 368.10 | 320.50 | | 2048 | 232 | 1179650.00 | 1.51 | 20.32 | 132.74 | 8.31 | 169.39 | 0.44 | 0.00 | 362.49 | 369.42 | 370.47 | 372.11 | 332.71 | | 2048 | 240 | 1179650.00 | 1.58 | 18.17 | 146.59 | 8.71 | 168.74 | 0.44 | 0.00 | 365.72 | 368.24 | 369.50 | 372.40 | 344.22 | | 2048 | 248 | 1179650.00 | 1.58 | 20.87 | 154.53 | 8.20 | 168.54 | 0.44 | 0.00 | 363.30 | 371.63 | 373.75 | 376.55 | 354.16 | | 2048 | 256 | 1179650.00 | 1.66 | 17.51 | 167.41 | 7.93 | 169.97 | 0.44 | 0.00 | 365.42 | 367.29 | 367.73 | 369.40 | 364.92 | </details> #### Online: NVIDIA T4, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Backend |NVIDIA TensorRT | | Backend accelerator |-| | Precision |FP16 | | Model format |NVIDIA TensorRT | | Max batch size |131072 | | Number of model instances |2| | Export Format | TensorFlow SavedModel | | NVIDIA TensorRT Capture CUDA Graph | Enabled | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_t4_experiment_10_triton_performance_online_10/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 2048 | 8 | 1689960.00 | 0.82 | 3.48 | 1.44 | 0.82 | 3.02 | 0.06 | 0.00 | 10.52 | 11.08 | 11.28 | 11.82 | 9.64 | | 2048 | 16 | 1585610.00 | 1.10 | 5.57 | 3.93 | 1.59 | 8.20 | 0.06 | 0.00 | 19.40 | 25.78 | 26.49 | 29.85 | 20.45 | | 2048 | 24 | 1564670.00 | 1.43 | 6.54 | 7.94 | 2.13 | 12.68 | 0.07 | 0.00 | 32.02 | 32.82 | 32.97 | 33.25 | 30.80 | | 2048 | 32 | 1525760.00 | 1.55 | 8.59 | 8.88 | 2.97 | 19.30 | 0.08 | 0.00 | 45.15 | 50.58 | 57.80 | 61.77 | 41.38 | | 2048 | 40 | 1583100.00 | 1.55 | 10.34 | 10.41 | 3.47 | 24.54 | 0.09 | 0.00 | 50.92 | 61.64 | 73.60 | 85.84 | 50.41 | | 2048 | 48 | 1640450.00 | 1.60 | 10.56 | 13.58 | 4.51 | 28.45 | 0.12 | 0.00 | 61.22 | 74.89 | 86.59 | 91.35 | 58.82 | | 2048 | 56 | 1525760.00 | 1.64 | 13.66 | 10.72 | 4.76 | 40.94 | 0.14 | 0.00 | 78.29 | 90.64 | 91.98 | 97.42 | 71.86 | | 2048 | 64 | 1574910.00 | 1.59 | 12.86 | 13.92 | 6.62 | 46.63 | 0.17 | 0.00 | 84.43 | 91.45 | 112.34 | 125.38 | 81.79 | | 2048 | 72 | 1473090.00 | 1.69 | 15.22 | 20.89 | 6.43 | 48.72 | 0.20 | 0.00 | 95.13 | 120.03 | 122.96 | 124.02 | 93.14 | | 2048 | 80 | 1662980.00 | 1.57 | 17.32 | 21.28 | 6.73 | 46.90 | 0.21 | 0.00 | 95.96 | 132.60 | 135.03 | 148.41 | 94.02 | | 2048 | 88 | 1624060.00 | 1.61 | 16.58 | 24.76 | 7.94 | 50.47 | 0.20 | 0.00 | 101.01 | 137.54 | 140.87 | 143.96 | 101.56 | | 2048 | 96 | 1703940.00 | 1.61 | 17.20 | 25.42 | 7.61 | 54.91 | 0.20 | 0.00 | 110.98 | 135.92 | 151.28 | 165.95 | 106.95 | | 2048 | 104 | 1622020.00 | 1.89 | 17.01 | 41.48 | 7.07 | 53.83 | 0.19 | 0.00 | 122.34 | 135.69 | 146.57 | 168.18 | 121.46 | | 2048 | 112 | 1945600.00 | 1.74 | 13.44 | 28.63 | 7.23 | 60.03 | 0.18 | 0.00 | 111.46 | 142.73 | 151.17 | 171.38 | 111.26 | | 2048 | 120 | 1919100.00 | 1.74 | 13.70 | 32.97 | 7.68 | 61.34 | 0.18 | 0.00 | 115.54 | 146.44 | 149.95 | 170.00 | 117.61 | | 2048 | 128 | 1933310.00 | 1.68 | 15.30 | 38.92 | 7.28 | 61.93 | 0.21 | 0.00 | 127.46 | 148.73 | 167.49 | 180.54 | 125.32 | | 2048 | 136 | 1732920.00 | 1.79 | 16.22 | 52.00 | 9.77 | 65.01 | 0.22 | 0.00 | 161.86 | 173.24 | 173.96 | 174.94 | 145.03 | | 2048 | 144 | 1802240.00 | 1.74 | 19.45 | 55.78 | 8.68 | 67.15 | 0.20 | 0.00 | 162.88 | 172.74 | 173.50 | 177.37 | 153.00 | | 2048 | 152 | 1898500.00 | 1.64 | 16.21 | 58.72 | 8.35 | 68.42 | 0.20 | 0.00 | 163.08 | 172.43 | 173.68 | 178.57 | 153.55 | | 2048 | 160 | 2060290.00 | 1.74 | 15.49 | 51.38 | 10.67 | 68.51 | 0.32 | 0.00 | 163.39 | 174.03 | 175.48 | 176.47 | 148.11 | | 2048 | 168 | 1961980.00 | 1.57 | 22.56 | 58.75 | 10.48 | 68.02 | 0.21 | 0.00 | 166.14 | 177.22 | 180.09 | 182.40 | 161.58 | | 2048 | 176 | 2166780.00 | 1.64 | 14.96 | 45.06 | 10.78 | 81.05 | 0.21 | 0.00 | 136.12 | 200.28 | 201.15 | 204.05 | 153.70 | | 2048 | 184 | 2119680.00 | 1.60 | 18.60 | 57.29 | 9.85 | 80.64 | 0.27 | 0.00 | 171.14 | 213.86 | 218.87 | 249.21 | 168.25 | | 2048 | 192 | 2097150.00 | 1.59 | 15.68 | 56.32 | 10.56 | 82.88 | 0.22 | 0.00 | 194.18 | 201.81 | 202.93 | 206.86 | 167.26 | | 2048 | 200 | 2097150.00 | 1.58 | 17.20 | 61.80 | 10.67 | 82.66 | 0.28 | 0.00 | 197.23 | 214.77 | 220.22 | 223.59 | 174.20 | | 2048 | 208 | 2097150.00 | 1.55 | 15.34 | 70.57 | 11.21 | 81.81 | 0.24 | 0.00 | 198.06 | 220.45 | 222.52 | 224.45 | 180.73 | | 2048 | 216 | 2103300.00 | 1.60 | 16.60 | 76.06 | 10.58 | 82.43 | 0.24 | 0.00 | 199.23 | 223.14 | 224.37 | 225.89 | 187.51 | | 2048 | 224 | 2097150.00 | 1.52 | 16.82 | 81.37 | 9.81 | 82.91 | 0.22 | 0.00 | 210.20 | 220.22 | 220.76 | 221.99 | 192.66 | | 2048 | 232 | 2095060.00 | 1.52 | 17.79 | 88.51 | 10.20 | 82.63 | 0.24 | 0.00 | 218.66 | 222.50 | 223.32 | 227.20 | 200.89 | | 2048 | 240 | 2095060.00 | 1.47 | 18.26 | 93.63 | 10.26 | 82.72 | 0.25 | 0.00 | 219.27 | 222.50 | 223.44 | 226.30 | 206.61 | | 2048 | 248 | 2076670.00 | 1.42 | 25.49 | 95.51 | 11.06 | 81.93 | 0.23 | 0.00 | 221.54 | 224.98 | 227.86 | 232.00 | 215.63 | | 2048 | 256 | 2095060.00 | 1.46 | 17.32 | 109.94 | 10.63 | 82.65 | 0.24 | 0.00 | 222.16 | 225.26 | 226.11 | 229.25 | 222.25 | </details> ## Advanced | Inference runtime | Mnemonic used in scripts | |--------------------|--------------------------| | [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `tf-savedmodel` | | [TensorFlow TensorRT](https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html) | `tf-trt` | | [ONNX](https://onnx.ai) | `onnx` | | [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` | ### Step by step deployment process Commands described below can be used for exporting, converting and profiling the model. #### Clone Repository IMPORTANT: This step is executed on the host computer. <details> <summary>Clone Repository Command</summary> ```shell git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow2/Recommendation/WideAndDeep ``` </details> #### Start Triton Inference Server Setup the environment in the host computer and start Triton Inference Server. <details> <summary>Setup Environment and Start Triton Inference Server Command</summary> ```shell source ./triton/scripts/setup_environment.sh ./triton/scripts/docker/triton_inference_server.sh ``` </details> #### Setup Container Build and run a container that extends the NGC TensorFlow2 container with the Triton Inference Server client libraries and dependencies. <details> <summary>Setup Container Command</summary> Build container: ```shell ./triton/scripts/docker/build.sh ``` Run container in interactive mode: ```shell ./triton/scripts/docker/interactive.sh ``` Setup environment in order to share artifacts in steps and with Triton Inference Server: ```shell source ./triton/scripts/setup_environment.sh ``` </details> #### Export Model Export model from Python source to desired format (e.g. Savedmodel or TorchScript) <details> <summary>Export Model Command</summary> ```shell python3 triton/export_model.py \ --input-path triton/model.py \ --input-type tf-keras \ --output-path ${SHARED_DIR}/exported_model.savedmodel \ --output-type tf-savedmodel \ --ignore-unknown-parameters \ \ --checkpoint-dir ${CHECKPOINTS_DIR}/widedeep_tf2_amp_base_128k_nvtabular/checkpoint \ --batch-size 131072 \ --precision fp32 \ \ --dataloader triton/dataloader.py \ --batch-size 131072 \ --data-pattern "${DATASETS_DIR}/outbrain/valid/*.parquet" ``` </details> #### Convert Model Convert the model from training to inference format (e.g. TensorRT). <details> <summary>Convert Model Command</summary> ```shell model-navigator convert \ --model-name WidenDeep \ --model-path ${SHARED_DIR}/exported_model.savedmodel \ --output-path ${SHARED_DIR}/converted_model \ --target-formats tf-savedmodel \ --target-precisions fp32 \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --max-batch-size 131072 \ --max-workspace-size 8589934592 \ --atol wide_deep_model=0.015 \ --rtol wide_deep_model=12.0 ``` </details> #### Deploy Model Configure the model on Triton Inference Server. Generate the configuration from your model repository. <details> <summary>Deploy Model Command</summary> ```shell model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name WidenDeep \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format tf-savedmodel \ --model-control-mode explicit \ --load-model \ --load-model-timeout-s 120 \ --verbose \ \ --batching dynamic \ --backend-accelerator amp \ --tensorrt-precision fp32 \ --tensorrt-capture-cuda-graph \ --max-batch-size 131072 \ --preferred-batch-sizes 131072 \ --engine-count-per-device gpu=2 ``` </details> #### Triton Performance Offline Test We want to maximize throughput. It assumes you have your data available for inference or that your data saturate to maximum batch size quickly. Triton Inference Server supports offline scenarios with static batching. Static batching allows inference requests to be served as they are received. The largest improvements to throughput come from increasing the batch size due to efficiency gains in the GPU with larger batches. <details> <summary>Triton Performance Offline Test Command</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name WidenDeep \ --input-data random \ --batch-sizes 1 16384 32768 49152 65536 81920 98304 114688 131072 \ --concurrency 1 \ --performance-tool perf_analyzer \ --measurement-request-count 100 \ --evaluation-mode offline \ --warmup \ --result-path ${SHARED_DIR}/triton_performance_offline.csv ``` </details> #### Triton Performance Online Test We want to maximize throughput within latency budget constraints. Dynamic batching is a feature of Triton Inference Server that allows inference requests to be combined by the server, so that a batch is created dynamically, resulting in a reduced average latency. <details> <summary>Triton Performance Online Test</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name WidenDeep \ --input-data random \ --batch-sizes 2048 \ --concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \ --performance-tool perf_analyzer \ --measurement-request-count 500 \ --evaluation-mode online \ --warmup \ --result-path ${SHARED_DIR}/triton_performance_online.csv ``` </details> ### Latency explanation A typical Triton Inference Server pipeline can be broken down into the following steps: 1. The client serializes the inference request into a message and sends it to the server (Client Send). 2. The message travels over the network from the client to the server (Network). 3. The message arrives at the server and is deserialized (Server Receive). 4. The request is placed on the queue (Server Queue). 5. The request is removed from the queue and computed (Server Compute). 6. The completed request is serialized in a message and sent back to the client (Server Send). 7. The completed message then travels over the network from the server to the client (Network). 8. The completed message is deserialized by the client and processed as a completed inference request (Client Receive). Generally, for local clients, steps 1-4 and 6-8 will only occupy a small fraction of time, compared to step 5. In distributed systems and online processing where client and server side are connect through network, the send and receive steps might have impact on overall processing performance. In order to analyze the possible bottlenecks, the detailed charts are presented in online scenario cases. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data, refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog May 2022 - Initial release ### Known issues - There are no known issues with this model.
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
shape_utils_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.shape_utils.""" import numpy as np import tensorflow as tf from object_detection.utils import shape_utils class UtilTest(tf.test.TestCase): def test_pad_tensor_using_integer_input(self): t1 = tf.constant([1], dtype=tf.int32) pad_t1 = shape_utils.pad_tensor(t1, 2) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) pad_t2 = shape_utils.pad_tensor(t2, 2) self.assertEqual(2, pad_t1.get_shape()[0]) self.assertEqual(2, pad_t2.get_shape()[0]) with self.test_session() as sess: pad_t1_result, pad_t2_result = sess.run([pad_t1, pad_t2]) self.assertAllEqual([1, 0], pad_t1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result) def test_pad_tensor_using_tensor_input(self): t1 = tf.constant([1], dtype=tf.int32) pad_t1 = shape_utils.pad_tensor(t1, tf.constant(2)) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) pad_t2 = shape_utils.pad_tensor(t2, tf.constant(2)) with self.test_session() as sess: pad_t1_result, pad_t2_result = sess.run([pad_t1, pad_t2]) self.assertAllEqual([1, 0], pad_t1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], pad_t2_result) def test_clip_tensor_using_integer_input(self): t1 = tf.constant([1, 2, 3], dtype=tf.int32) clip_t1 = shape_utils.clip_tensor(t1, 2) t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) clip_t2 = shape_utils.clip_tensor(t2, 2) self.assertEqual(2, clip_t1.get_shape()[0]) self.assertEqual(2, clip_t2.get_shape()[0]) with self.test_session() as sess: clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2]) self.assertAllEqual([1, 2], clip_t1_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result) def test_clip_tensor_using_tensor_input(self): t1 = tf.constant([1, 2, 3], dtype=tf.int32) clip_t1 = shape_utils.clip_tensor(t1, tf.constant(2)) t2 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) clip_t2 = shape_utils.clip_tensor(t2, tf.constant(2)) with self.test_session() as sess: clip_t1_result, clip_t2_result = sess.run([clip_t1, clip_t2]) self.assertAllEqual([1, 2], clip_t1_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], clip_t2_result) def test_pad_or_clip_tensor_using_integer_input(self): t1 = tf.constant([1], dtype=tf.int32) tt1 = shape_utils.pad_or_clip_tensor(t1, 2) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) tt2 = shape_utils.pad_or_clip_tensor(t2, 2) t3 = tf.constant([1, 2, 3], dtype=tf.int32) tt3 = shape_utils.clip_tensor(t3, 2) t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) tt4 = shape_utils.clip_tensor(t4, 2) self.assertEqual(2, tt1.get_shape()[0]) self.assertEqual(2, tt2.get_shape()[0]) self.assertEqual(2, tt3.get_shape()[0]) self.assertEqual(2, tt4.get_shape()[0]) with self.test_session() as sess: tt1_result, tt2_result, tt3_result, tt4_result = sess.run( [tt1, tt2, tt3, tt4]) self.assertAllEqual([1, 0], tt1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result) self.assertAllEqual([1, 2], tt3_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result) def test_pad_or_clip_tensor_using_tensor_input(self): t1 = tf.constant([1], dtype=tf.int32) tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2)) t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32) tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2)) t3 = tf.constant([1, 2, 3], dtype=tf.int32) tt3 = shape_utils.clip_tensor(t3, tf.constant(2)) t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32) tt4 = shape_utils.clip_tensor(t4, tf.constant(2)) with self.test_session() as sess: tt1_result, tt2_result, tt3_result, tt4_result = sess.run( [tt1, tt2, tt3, tt4]) self.assertAllEqual([1, 0], tt1_result) self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result) self.assertAllEqual([1, 2], tt3_result) self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result) def test_combines_static_dynamic_shape(self): tensor = tf.placeholder(tf.float32, shape=(None, 2, 3)) combined_shape = shape_utils.combined_static_and_dynamic_shape( tensor) self.assertTrue(tf.contrib.framework.is_tensor(combined_shape[0])) self.assertListEqual(combined_shape[1:], [2, 3]) def test_pad_or_clip_nd_tensor(self): tensor_placeholder = tf.placeholder(tf.float32, [None, 5, 4, 7]) output_tensor = shape_utils.pad_or_clip_nd( tensor_placeholder, [None, 3, 5, tf.constant(6)]) self.assertAllEqual(output_tensor.shape.as_list(), [None, 3, 5, None]) with self.test_session() as sess: output_tensor_np = sess.run( output_tensor, feed_dict={ tensor_placeholder: np.random.rand(2, 5, 4, 7), }) self.assertAllEqual(output_tensor_np.shape, [2, 3, 5, 6]) class StaticOrDynamicMapFnTest(tf.test.TestCase): def test_with_dynamic_shape(self): def fn(input_tensor): return tf.reduce_sum(input_tensor) input_tensor = tf.placeholder(tf.float32, shape=(None, 2)) map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor) op_names = [op.name for op in tf.get_default_graph().get_operations()] self.assertTrue(any(['map' == op_name[:3] for op_name in op_names])) with self.test_session() as sess: result1 = sess.run( map_fn_output, feed_dict={ input_tensor: [[1, 2], [3, 1], [0, 4]]}) result2 = sess.run( map_fn_output, feed_dict={ input_tensor: [[-1, 1], [0, 9]]}) self.assertAllEqual(result1, [3, 4, 4]) self.assertAllEqual(result2, [0, 9]) def test_with_static_shape(self): def fn(input_tensor): return tf.reduce_sum(input_tensor) input_tensor = tf.constant([[1, 2], [3, 1], [0, 4]], dtype=tf.float32) map_fn_output = shape_utils.static_or_dynamic_map_fn(fn, input_tensor) op_names = [op.name for op in tf.get_default_graph().get_operations()] self.assertTrue(all(['map' != op_name[:3] for op_name in op_names])) with self.test_session() as sess: result = sess.run(map_fn_output) self.assertAllEqual(result, [3, 4, 4]) def test_with_multiple_dynamic_shapes(self): def fn(elems): input_tensor, scalar_index_tensor = elems return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), []) input_tensor = tf.placeholder(tf.float32, shape=(None, 3)) scalar_index_tensor = tf.placeholder(tf.int32, shape=(None, 1)) map_fn_output = shape_utils.static_or_dynamic_map_fn( fn, [input_tensor, scalar_index_tensor], dtype=tf.float32) op_names = [op.name for op in tf.get_default_graph().get_operations()] self.assertTrue(any(['map' == op_name[:3] for op_name in op_names])) with self.test_session() as sess: result1 = sess.run( map_fn_output, feed_dict={ input_tensor: [[1, 2, 3], [4, 5, -1], [0, 6, 9]], scalar_index_tensor: [[0], [2], [1]], }) result2 = sess.run( map_fn_output, feed_dict={ input_tensor: [[-1, 1, 0], [3, 9, 30]], scalar_index_tensor: [[1], [0]] }) self.assertAllEqual(result1, [1, -1, 6]) self.assertAllEqual(result2, [1, 3]) def test_with_multiple_static_shapes(self): def fn(elems): input_tensor, scalar_index_tensor = elems return tf.reshape(tf.slice(input_tensor, scalar_index_tensor, [1]), []) input_tensor = tf.constant([[1, 2, 3], [4, 5, -1], [0, 6, 9]], dtype=tf.float32) scalar_index_tensor = tf.constant([[0], [2], [1]], dtype=tf.int32) map_fn_output = shape_utils.static_or_dynamic_map_fn( fn, [input_tensor, scalar_index_tensor], dtype=tf.float32) op_names = [op.name for op in tf.get_default_graph().get_operations()] self.assertTrue(all(['map' != op_name[:3] for op_name in op_names])) with self.test_session() as sess: result = sess.run(map_fn_output) self.assertAllEqual(result, [1, -1, 6]) def test_fails_with_nested_input(self): def fn(input_tensor): return input_tensor input_tensor1 = tf.constant([1]) input_tensor2 = tf.constant([2]) with self.assertRaisesRegexp( ValueError, '`elems` must be a Tensor or list of Tensors.'): shape_utils.static_or_dynamic_map_fn( fn, [input_tensor1, [input_tensor2]], dtype=tf.float32) class CheckMinImageShapeTest(tf.test.TestCase): def test_check_min_image_dim_static_shape(self): input_tensor = tf.constant(np.zeros([1, 42, 42, 3])) _ = shape_utils.check_min_image_dim(33, input_tensor) with self.assertRaisesRegexp( ValueError, 'image size must be >= 64 in both height and width.'): _ = shape_utils.check_min_image_dim(64, input_tensor) def test_check_min_image_dim_dynamic_shape(self): input_placeholder = tf.placeholder(tf.float32, shape=[1, None, None, 3]) image_tensor = shape_utils.check_min_image_dim(33, input_placeholder) with self.test_session() as sess: sess.run(image_tensor, feed_dict={input_placeholder: np.zeros([1, 42, 42, 3])}) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(image_tensor, feed_dict={input_placeholder: np.zeros([1, 32, 32, 3])}) class AssertShapeEqualTest(tf.test.TestCase): def test_unequal_static_shape_raises_exception(self): shape_a = tf.constant(np.zeros([4, 2, 2, 1])) shape_b = tf.constant(np.zeros([4, 2, 3, 1])) with self.assertRaisesRegexp( ValueError, 'Unequal shapes'): shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(shape_a), shape_utils.combined_static_and_dynamic_shape(shape_b)) def test_equal_static_shape_succeeds(self): shape_a = tf.constant(np.zeros([4, 2, 2, 1])) shape_b = tf.constant(np.zeros([4, 2, 2, 1])) with self.test_session() as sess: op = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(shape_a), shape_utils.combined_static_and_dynamic_shape(shape_b)) sess.run(op) def test_unequal_dynamic_shape_raises_tf_assert(self): tensor_a = tf.placeholder(tf.float32, shape=[1, None, None, 3]) tensor_b = tf.placeholder(tf.float32, shape=[1, None, None, 3]) op = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(tensor_a), shape_utils.combined_static_and_dynamic_shape(tensor_b)) with self.test_session() as sess: with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]), tensor_b: np.zeros([1, 4, 4, 3])}) def test_equal_dynamic_shape_succeeds(self): tensor_a = tf.placeholder(tf.float32, shape=[1, None, None, 3]) tensor_b = tf.placeholder(tf.float32, shape=[1, None, None, 3]) op = shape_utils.assert_shape_equal( shape_utils.combined_static_and_dynamic_shape(tensor_a), shape_utils.combined_static_and_dynamic_shape(tensor_b)) with self.test_session() as sess: sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]), tensor_b: np.zeros([1, 2, 2, 3])}) def test_unequal_static_shape_along_first_dim_raises_exception(self): shape_a = tf.constant(np.zeros([4, 2, 2, 1])) shape_b = tf.constant(np.zeros([6, 2, 3, 1])) with self.assertRaisesRegexp( ValueError, 'Unequal first dimension'): shape_utils.assert_shape_equal_along_first_dimension( shape_utils.combined_static_and_dynamic_shape(shape_a), shape_utils.combined_static_and_dynamic_shape(shape_b)) def test_equal_static_shape_along_first_dim_succeeds(self): shape_a = tf.constant(np.zeros([4, 2, 2, 1])) shape_b = tf.constant(np.zeros([4, 7, 2])) with self.test_session() as sess: op = shape_utils.assert_shape_equal_along_first_dimension( shape_utils.combined_static_and_dynamic_shape(shape_a), shape_utils.combined_static_and_dynamic_shape(shape_b)) sess.run(op) def test_unequal_dynamic_shape_along_first_dim_raises_tf_assert(self): tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3]) tensor_b = tf.placeholder(tf.float32, shape=[None, None, 3]) op = shape_utils.assert_shape_equal_along_first_dimension( shape_utils.combined_static_and_dynamic_shape(tensor_a), shape_utils.combined_static_and_dynamic_shape(tensor_b)) with self.test_session() as sess: with self.assertRaises(tf.errors.InvalidArgumentError): sess.run(op, feed_dict={tensor_a: np.zeros([1, 2, 2, 3]), tensor_b: np.zeros([2, 4, 3])}) def test_equal_dynamic_shape_along_first_dim_succeeds(self): tensor_a = tf.placeholder(tf.float32, shape=[None, None, None, 3]) tensor_b = tf.placeholder(tf.float32, shape=[None]) op = shape_utils.assert_shape_equal_along_first_dimension( shape_utils.combined_static_and_dynamic_shape(tensor_a), shape_utils.combined_static_and_dynamic_shape(tensor_b)) with self.test_session() as sess: sess.run(op, feed_dict={tensor_a: np.zeros([5, 2, 2, 3]), tensor_b: np.zeros([5])}) if __name__ == '__main__': tf.test.main()
TensorFlow/Classification/ConvNets/se-resnext101-32x4d
se-resnext101-32x4d
README
# SE-ResNext101-32x4d for TensorFlow This repository provides a script and recipe to train the SE-ResNext101-32x4d model to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. SE-ResNext101-32x4d model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements. ## Table Of Contents * [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Optimizer](#optimizer) * [Data augmentation](#data-augmentation) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Setup](#setup) * [Requirements](#requirements) * [Quick Start Guide](#quick-start-guide) * [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [The `main.py` script](#the-mainpy-script) * [Inference process](#inference-process) * [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16G)](#training-accuracy-nvidia-dgx-1-8x-v100-16g) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb) * [Training performance: NVIDIA DGX-1 (8x V100 16G)](#training-performance-nvidia-dgx-1-8x-v100-16g) * [Training performance: NVIDIA DGX-2 (16x V100 32G)](#training-performance-nvidia-dgx-2-16x-v100-32g) * [Training time for 90 Epochs](#training-time-for-90-epochs) * [Training time: NVIDIA DGX A100 (8x A100 40G)](#training-time-nvidia-dgx-a100-8x-a100-40gb) * [Training time: NVIDIA DGX-1 (8x V100 16G)](#training-time-nvidia-dgx-1-8x-v100-16g) * [Training time: NVIDIA DGX-2 (16x V100 32G)](#training-time-nvidia-dgx-2-16x-v100-32g) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb) * [Inference performance: NVIDIA DGX-1 (1x V100 16G)](#inference-performance-nvidia-dgx-1-1x-v100-16g) * [Inference performance: NVIDIA DGX-2 (1x V100 32G)](#inference-performance-nvidia-dgx-2-1x-v100-32g) * [Inference performance: NVIDIA T4 (1x T4 16G)](#inference-performance-nvidia-t4-1x-t4-16g) * [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The SE-ResNeXt101-32x4d is a [ResNeXt101-32x4d](https://arxiv.org/pdf/1611.05431.pdf) model with added Squeeze-and-Excitation module introduced in the [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf) paper. The following performance optimizations were implemented in this model: * JIT graph compilation with [XLA](https://www.tensorflow.org/xla) * Multi-GPU training with [Horovod](https://github.com/horovod/horovod) * Automated mixed precision [AMP](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture Here is a diagram of the Squeeze and Excitation module architecture for ResNet-type models: ![SEArch](./imgs/SEArch.png) _Image source: [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf)_ This image shows the architecture of the SE block and where it is placed in the ResNet bottleneck block. ### Default configuration The following sections highlight the default configuration for the SE-ResNext101-32x4d model. #### Optimizer This model uses the SGD optimizer with the following hyperparameters: * Momentum (0.875). * Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly scale the learning rate. * Learning rate schedule - we use cosine LR schedule. * For bigger batch sizes (512 and up) we use linear warmup of the learning rate. during the first 5 epochs according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677). * Weight decay: 6.103515625e-05 (1/16384). * We do not apply Weight decay on batch norm trainable parameters (gamma/bias). * Label Smoothing: 0.1. * We train for: * 90 Epochs -> 90 epochs is a standard for ResNet family networks. * 250 Epochs -> best possible accuracy. * For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf). #### Data Augmentation This model uses the following data augmentation: * For training: * Normalization. * Random resized crop to 224x224. * Scale from 8% to 100%. * Aspect ratio from 3/4 to 4/3. * Random horizontal flip. * For inference: * Normalization. * Scale to 256x256. * Center crop to 224x224. ### Feature support matrix The following features are supported by this model. | Feature | SE-ResNext101-32x4d Tensorflow | |-----------------------|-------------------------- |Multi-GPU training with [Horovod](https://github.com/horovod/horovod) | Yes | |[NVIDIA DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes | |Automatic mixed precision (AMP) | Yes | #### Features Multi-GPU training with Horovod - Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, refer to the example sources in this repository or the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage). NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader with the DALI library. For details, refer to the example sources in this repository or the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/index.html). Automatic mixed precision (AMP) - Computation graph can be modified by TensorFlow on runtime to support mixed precision training. Detailed explanation of mixed precision can be found in the next section. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally. In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. #### Enabling mixed precision Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models. To enable mixed precision, you can simply add the values to the environmental variables inside your training script: - Enable TF-AMP graph rewrite: ``` os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1" ``` - Enable Automated Mixed Precision: ``` os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ## Setup The following section lists the requirements that you need to meet in order to use the SE-ResNext101-32x4d model. ### Requirements This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates all dependencies. Aside from these dependencies, ensure you have the following software: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) - GPU-based architecture: - [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: * [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html), * [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry), * [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running). For those unable to use the [TensorFlow NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed precision or TF32 with Tensor Cores or FP32, perform the following steps using the default parameters of the SE-ResNext101-32x4d model on the [ImageNet](http://www.image-net.org/) dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow/Classification/ConvNets ``` 2. Download and preprocess the dataset. The SE-ResNext101-32x4d script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge. * [Download the images](http://image-net.org/download-images) * Extract the training and validation data: ```bash mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd .. mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar ``` * Preprocess dataset to TFRecord form using [script](https://github.com/tensorflow/models/blob/archive/research/inception/inception/data/build_imagenet_data.py). Additional metadata from [autors repository](https://github.com/tensorflow/models/tree/archive/research/inception/inception/data) might be required. 3. Build the SE-ResNext101-32x4d TensorFlow NGC container. ```bash docker build . -t nvidia_rn50 ``` 4. Start an interactive session in the NGC container to run training/inference. After you build the container image, you can start an interactive CLI session with ```bash nvidia-docker run --rm -it -v <path to imagenet>:/data/tfrecords --ipc=host nvidia_rn50 ``` 5. (Optional) Create index files to use DALI. To allow proper sharding in a multi-GPU environment, DALI has to create index files for the dataset. To create index files, run inside the container: ```bash bash ./utils/dali_index.sh /data/tfrecords <index file store location> ``` Index files can be created once and then reused. It is highly recommended to save them into a persistent location. 6. Start training. To run training for a standard configuration (as described in [Default configuration](#default-configuration), DGX1V, DGX2V, single GPU, FP16, FP32, 90, and 250 epochs), run one of the scripts in the `se-resnext101-32x4d/training` directory. Ensure ImageNet is mounted in the `/data/tfrecords` directory. For example, to train on DGX-1 for 90 epochs using AMP, run: `bash ./se-resnext101-32x4d/training/DGX1_SE-RNxt101-32x4d_AMP_90E.sh /path/to/result /data` Additionally, features like DALI data preprocessing or TensorFlow XLA can be enabled with following arguments when running those scripts: `bash ./se-resnext101-32x4d/training/DGX1_SE-RNxt101-32x4d_AMP_90E.sh /path/to/result /data/ --xla --dali` 7. Start validation/evaluation. To evaluate the validation dataset located in `/data/tfrecords`, run `main.py` with `--mode=evaluate`. For example: `python main.py --arch=se-resnext101-32x4d --mode=evaluate --data_dir=/data/tfrecords --batch_size <batch size> --model_dir <model location> --results_dir <output location> [--xla] [--amp]` The optional `--xla` and `--amp` flags control XLA and AMP during evaluation. ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: - `main.py`: the script that controls the logic of training and validation of the ResNet-like models - `Dockerfile`: Instructions for Docker to build a container with the basic set of dependencies to run ResNet like models for image classification - `requirements.txt`: a set of extra Python requirements for running ResNet-like models The `model/` directory contains the following modules used to define ResNet family models: - `resnet.py`: the definition of ResNet, ResNext, and SE-ResNext model - `blocks/conv2d_block.py`: the definition of 2D convolution block - `blocks/resnet_bottleneck_block.py`: the definition of ResNet-like bottleneck block - `layers/*.py`: definitions of specific layers used in the ResNet-like model The `utils/` directory contains the following utility modules: - `cmdline_helper.py`: helper module for command line processing - `data_utils.py`: module defining input data pipelines - `dali_utils.py`: helper module for DALI - `image_processing.py`: image processing and data augmentation functions - `learning_rate.py`: definition of used learning rate schedule - `optimizers.py`: definition of used custom optimizers - `hooks/*.py`: definitions of specific hooks allowing logging of training and inference process The `runtime/` directory contains the following module that define the mechanics of the training process: - `runner.py`: module encapsulating the training, inference and evaluation ### Parameters #### The `main.py` script The script for training and evaluating the ResNext101-32x4d model has a variety of parameters that control these processes. ``` usage: main.py [-h] [--arch {resnet50,resnext101-32x4d,se-resnext101-32x4d}] [--mode {train,train_and_evaluate,evaluate,predict,training_benchmark,inference_benchmark}] [--export_dir EXPORT_DIR] [--to_predict TO_PREDICT] --batch_size BATCH_SIZE [--num_iter NUM_ITER] [--run_iter RUN_ITER] [--iter_unit {epoch,batch}] [--warmup_steps WARMUP_STEPS] [--model_dir MODEL_DIR] [--results_dir RESULTS_DIR] [--log_filename LOG_FILENAME] [--display_every DISPLAY_EVERY] [--seed SEED] [--gpu_memory_fraction GPU_MEMORY_FRACTION] [--gpu_id GPU_ID] [--finetune_checkpoint FINETUNE_CHECKPOINT] [--use_final_conv] [--quant_delay QUANT_DELAY] [--quantize] [--use_qdq] [--symmetric] [--data_dir DATA_DIR] [--data_idx_dir DATA_IDX_DIR] [--dali] [--synthetic_data_size SYNTHETIC_DATA_SIZE] [--lr_init LR_INIT] [--lr_warmup_epochs LR_WARMUP_EPOCHS] [--weight_decay WEIGHT_DECAY] [--weight_init {fan_in,fan_out}] [--momentum MOMENTUM] [--label_smoothing LABEL_SMOOTHING] [--mixup MIXUP] [--cosine_lr] [--xla] [--data_format {NHWC,NCHW}] [--amp] [--static_loss_scale STATIC_LOSS_SCALE] JoC-RN50v1.5-TF optional arguments: -h, --help show this help message and exit. --arch {resnet50,resnext101-32x4d,se-resnext101-32x4d} Architecture of model to run. --mode {train,train_and_evaluate,evaluate,predict,training_benchmark,inference_benchmark} The execution mode of the script. --export_dir EXPORT_DIR Directory in which to write exported SavedModel. --to_predict TO_PREDICT Path to file or directory of files to run prediction on. --batch_size BATCH_SIZE Size of each minibatch per GPU. --num_iter NUM_ITER Number of iterations to run. --run_iter RUN_ITER Number of training iterations to run on single run. --iter_unit {epoch,batch} Unit of iterations. --warmup_steps WARMUP_STEPS Number of steps considered as warmup and not taken into account for performance measurements. --model_dir MODEL_DIR Directory in which to write model. If undefined, results dir will be used. --results_dir RESULTS_DIR Directory in which to write training logs, summaries and checkpoints. --log_filename LOG_FILENAME Name of the JSON file to which write the training log. --display_every DISPLAY_EVERY How often (in batches) to print out running information. --seed SEED Random seed. --gpu_memory_fraction GPU_MEMORY_FRACTION Limit memory fraction used by training script for DALI. --gpu_id GPU_ID Specify ID of the target GPU on multi-device platform. Effective only for single-GPU mode. --finetune_checkpoint FINETUNE_CHECKPOINT Path to pre-trained checkpoint which will be used for fine-tuning. --use_final_conv Use convolution operator instead of MLP as last layer. --quant_delay QUANT_DELAY Number of steps to be run before quantization starts to happen. --quantize Quantize weights and activations during training. (Defaults to Assymmetric quantization) --use_qdq Use QDQV3 op instead of FakeQuantWithMinMaxVars op for quantization. QDQv3 does only scaling. --symmetric Quantize weights and activations during training using symmetric quantization. Dataset arguments: --data_dir DATA_DIR Path to dataset in TFRecord format. Files should be named 'train-*' and 'validation-*'. --data_idx_dir DATA_IDX_DIR Path to index files for DALI. Files should be named 'train-*' and 'validation-*'. --dali Enable DALI data input. --synthetic_data_size SYNTHETIC_DATA_SIZE Dimension of image for synthetic dataset. Training arguments: --lr_init LR_INIT Initial value for the learning rate. --lr_warmup_epochs LR_WARMUP_EPOCHS Number of warmup epochs for learning rate schedule. --weight_decay WEIGHT_DECAY Weight Decay scale factor. --weight_init {fan_in,fan_out} Model weight initialization method. --momentum MOMENTUM SGD momentum value for the Momentum optimizer. --label_smoothing LABEL_SMOOTHING The value of label smoothing. --mixup MIXUP The alpha parameter for mixup (if 0 then mixup is not applied). --cosine_lr Use cosine learning rate schedule. Generic optimization arguments: --xla Enable XLA (Accelerated Linear Algebra) computation for improved performance. --data_format {NHWC,NCHW} Data format used to do calculations. --amp Enable Automatic Mixed Precision to speedup computation using tensor cores. Automatic Mixed Precision arguments: --static_loss_scale STATIC_LOSS_SCALE Use static loss scaling in FP32 AMP. ``` ### Inference process To run inference on a single example with a checkpoint and a model script, use: `python main.py --arch=se-resnext101-32x4d --mode predict --model_dir <path to model> --to_predict <path to image> --results_dir <path to results>` The optional `--xla` and `--amp` flags control XLA and AMP during inference. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark the training performance on a specific batch size, run: * For 1 GPU * FP32 / TF32 `python ./main.py --arch=se-resnext101-32x4d --mode=training_benchmark --warmup_steps 200 --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `python ./main.py --arch=se-resnext101-32x4d --mode=training_benchmark --amp --warmup_steps 200 --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * For multiple GPUs * FP32 / TF32 `mpiexec --allow-run-as-root --bind-to socket -np <num_gpus> python ./main.py --arch=se-resnext101-32x4d --mode=training_benchmark --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `mpiexec --allow-run-as-root --bind-to socket -np <num_gpus> python ./main.py --arch=se-resnext101-32x4d --mode=training_benchmark --amp --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` Each of these scripts runs 200 warm-up iterations and measures the first epoch. To control warmup and benchmark length, use the `--warmup_steps`, `--num_iter` and `--iter_unit` flags. Features like XLA or DALI can be controlled with `--xla` and `--dali` flags. For proper throughput reporting the value of `--num_iter` must be greater than `--warmup_steps` value. Suggested batch sizes for training are 96 for mixed precision training and 64 for single precision training per single V100 16 GB. If no `--data_dir=<path to imagenet>` flag is specified then the benchmarks will use a synthetic dataset. The resolution of synthetic images used can be controlled with `--synthetic_data_size` flag. #### Inference performance benchmark To benchmark the inference performance on a specific batch size, run: * FP32 / TF32 `python ./main.py --arch=se-resnext101-32x4d --mode=inference_benchmark --warmup_steps 20 --num_iter 100 --iter_unit batch --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `python ./main.py --arch=se-resnext101-32x4d --mode=inference_benchmark --amp --warmup_steps 20 --num_iter 100 --iter_unit batch --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` By default, each of these scripts runs 20 warm-up iterations and measures the next 80 iterations. To control warm-up and benchmark length, use the `--warmup_steps`, `--num_iter` and `--iter_unit` flags. If no `--data_dir=<path to imagenet>` flag is specified then the benchmarks will use a synthetic dataset. The benchmark can be automated with the `inference_benchmark.sh` script provided in `se-resnext101-32x4d`, by simply running: `bash ./se-resnext101-32x4d/inference_benchmark.sh <data dir> <data idx dir>` The `<data dir>` parameter refers to the input data directory (by default `/data/tfrecords` inside the container). By default, the benchmark tests the following configurations: **FP32**, **AMP**, **AMP + XLA** with different batch sizes. When the optional directory with the DALI index files `<data idx dir>` is specified, the benchmark executes an additional **DALI + AMP + XLA** configuration. For proper throughput reporting the value of `--num_iter` must be greater than `--warmup_steps` value. For performance benchamrk of raw model, synthetic dataset can be used. To use synthetic dataset, use `--synthetic_data_size` flag instead of `--data_dir` to specify input image size. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the `/se-resnet50v1.5/training/DGXA100_RN50_{PRECISION}_90E.sh` training script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. | Epochs | Batch Size / GPU | Accuracy - TF32 (top1) | Accuracy - mixed precision (top1) | |--------|------------------|-----------------|----------------------------| | 90 | 128 (TF32) / 256 (AMP) | 79.73 | 79.60 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 16G) Our results were obtained by running the `/se-resnext101-32x4d/training/{/DGX1_RNxt101-32x4d_{PRECISION}_{EPOCHS}E.sh` training script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs. | Epochs | Batch Size / GPU | Accuracy - FP32 | Accuracy - mixed precision | |--------|------------------|-----------------|----------------------------| | 90 | 64 (FP32) / 96 (AMP) | 79.69 | 79.81 | | 250 | 64 (FP32) / 96 (AMP) | 80.87 | 80.84 | **Example training loss plot** ![TrainingLoss](./imgs/train_loss.png) #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the `se-resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - TF32 + XLA | Throughput - mixed precision + XLA | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 + XLA | Weak scaling - mixed precision + XLA | |----|---------------|---------------|------------------------|-----------------|-----------|-------------------| | 1 | 128 (TF) / 256 (AMP) | 342 img/s | 975 img/s | 2.86x | 1.00x | 1.00x | | 8 | 128 (TF) / 256 (AMP) | 2610 img/s | 7230 img/s | 2.77x | 7.63x | 7.41x | ##### Training performance: NVIDIA DGX-1 (8x V100 16G) Our results were obtained by running the `se-resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - FP32 + XLA | Throughput - mixed precision + XLA | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 + XLA | Weak scaling - mixed precision + XLA | |----|---------------|---------------|-----------------------|---------------|-----------|-------| | 1 | 64 (FP32) / 96 (AMP) | 152 img/s | 475 img/s | 3.12x | 1.00x | 1.00x | | 8 | 64 (FP32) / 96 (AMP) | 1120 img/s | 3360 img/s | 3.00x | 7.37x | 7.07x | ##### Training performance: NVIDIA DGX-2 (16x V100 32G) Our results were obtained by running the `se-resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-2 with (16x V100 32G) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - FP32 + XLA | Throughput - mixed precision + XLA | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 + XLA | Weak scaling - mixed precision + XLA | |----|---------------|---------------|-------------------------|-------|--------|--------| | 1 | 64 (FP32) / 96 (AMP) | 158 img/s | 472 img/s | 2.98x | 1.00x | 1.00x | | 16 | 64 (FP32) / 96 (AMP) | 2270 img/s| 6580 img/s | 2.89x | 14.36x | 13.94x | #### Training Time for 90 Epochs ##### Training time: NVIDIA DGX A100 (8x A100 40GB) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-a100-8x-a100-40g) on NVIDIA DGX A100 with (8x A100 40G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - TF32 + XLA | |---|--------|---------| | 1 | ~36h | ~102h | | 8 | ~5h | ~14h | ##### Training time: NVIDIA DGX-1 (8x V100 16G) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-1-8x-v100-16g) on NVIDIA DGX-1 with (8x V100 16G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - FP32 + XLA | |---|--------|---------| | 1 | ~68h | ~210h | | 8 | ~10h | ~29h | ##### Training time: NVIDIA DGX-2 (16x V100 32G) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-2-16x-v100-32g) on NVIDIA DGX-2 with (16x V100 32G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - FP32 + XLA | |----|-------|-------| | 1 | ~68h | ~202h | | 16 | ~5h | ~14h | #### Inference performance results ##### Inference performance: NVIDIA DGX A100 (1x A100 40GB) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 with (1x A100 40G) GPU. **TF32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 95.32 img/s | 10.52 ms | 10.52 ms | 10.55 ms | 11.10 ms | | 2 | 169.59 img/s | 11.82 ms | 11.83 ms | 11.92 ms | 12.56 ms | | 4 | 258.97 img/s | 15.45 ms | 15.70 ms | 15.78 ms | 16.22 ms | | 8 | 355.09 img/s | 22.53 ms | 22.74 ms | 22.84 ms | 23.17 ms | | 16 | 561.11 img/s | 28.52 ms | 28.85 ms | 29.09 ms | 29.50 ms | | 32 | 698.94 img/s | 45.78 ms | 46.36 ms | 46.56 ms | 46.87 ms | | 64 | 751.17 img/s | 85.21 ms | 86.74 ms | 87.27 ms | 87.95 ms | | 128 | 802.64 img/s | 159.47 ms | 160.01 ms | 160.35 ms | 161.42 ms | | 256 | 840.72 img/s | 304.50 ms | 305.87 ms | 306.11 ms | 306.57 ms | **TF32 Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 92.46 img/s | 10.84 ms | 10.90 ms | 10.96 ms | 11.14 ms | | 2 | 161.55 img/s | 12.40 ms | 12.44 ms | 12.51 ms | 12.62 ms | | 4 | 237.41 img/s | 16.88 ms | 17.54 ms | 17.79 ms | 18.25 ms | | 8 | 358.39 img/s | 22.35 ms | 23.56 ms | 24.29 ms | 25.53 ms | | 16 | 577.33 img/s | 27.72 ms | 28.64 ms | 28.92 ms | 29.22 ms | | 32 | 800.81 img/s | 39.97 ms | 40.93 ms | 41.42 ms | 41.87 ms | | 64 | 921.00 img/s | 69.64 ms | 70.44 ms | 70.90 ms | 79.54 ms | | 128 | 1024.70 img/s | 124.99 ms | 125.70 ms | 126.10 ms | 138.57 ms | | 256 | 1089.80 img/s | 234.90 ms | 236.02 ms | 236.37 ms | 237.26 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 84.06 img/s | 11.92 ms | 11.94 ms | 11.96 ms | 12.08 ms | | 2 | 170.38 img/s | 11.76 ms | 11.82 ms | 11.87 ms | 11.94 ms | | 4 | 336.09 img/s | 11.93 ms | 12.06 ms | 12.17 ms | 12.62 ms | | 8 | 669.91 img/s | 11.94 ms | 12.33 ms | 12.47 ms | 12.88 ms | | 16 | 1119.49 img/s | 14.36 ms | 14.86 ms | 15.11 ms | 16.11 ms | | 32 | 1482.46 img/s | 21.66 ms | 22.04 ms | 22.38 ms | 23.72 ms | | 64 | 1680.85 img/s | 38.09 ms | 39.02 ms | 39.34 ms | 41.02 ms | | 128 | 1728.27 img/s | 74.30 ms | 74.92 ms | 75.22 ms | 75.60 ms | | 256 | 1761.56 img/s | 145.33 ms | 146.54 ms | 146.83 ms | 147.34 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 74.83 img/s | 13.39 ms | 13.45 ms | 13.49 ms | 13.57 ms | | 2 | 135.28 img/s | 14.81 ms | 14.98 ms | 15.10 ms | 16.19 ms | | 4 | 272.18 img/s | 14.70 ms | 15.07 ms | 15.30 ms | 15.80 ms | | 8 | 517.69 img/s | 15.50 ms | 16.63 ms | 17.05 ms | 18.10 ms | | 16 | 1050.03 img/s | 15.38 ms | 16.84 ms | 17.49 ms | 17.97 ms | | 32 | 1781.06 img/s | 18.27 ms | 19.54 ms | 20.00 ms | 25.94 ms | | 64 | 2551.55 img/s | 25.26 ms | 26.03 ms | 26.62 ms | 29.67 ms | | 128 | 2834.59 img/s | 45.50 ms | 46.85 ms | 47.72 ms | 54.91 ms | | 256 | 3367.18 img/s | 76.03 ms | 77.06 ms | 77.36 ms | 78.13 ms | ##### Inference performance: NVIDIA DGX-1 (1x V100 16G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (1x V100 16G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 75.72 img/s | 13.25 ms | 13.38 ms | 13.50 ms | 13.66 ms | | 2 | 112.58 img/s | 17.90 ms | 20.74 ms | 20.91 ms | 21.87 ms | | 4 | 191.09 img/s | 20.93 ms | 21.05 ms | 21.09 ms | 21.27 ms | | 8 | 235.39 img/s | 33.98 ms | 34.14 ms | 34.19 ms | 34.28 ms | | 16 | 315.24 img/s | 50.76 ms | 50.96 ms | 51.01 ms | 51.32 ms | | 32 | 376.05 img/s | 85.09 ms | 85.56 ms | 85.71 ms | 86.40 ms | | 64 | 427.39 img/s | 149.84 ms | 150.08 ms | 150.37 ms | 161.87 ms | | 128 | 460.82 img/s | 277.76 ms | 278.97 ms | 279.48 ms | 280.95 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 66.44 img/s | 15.10 ms | 15.17 ms | 15.25 ms | 16.01 ms | | 2 | 132.33 img/s | 15.16 ms | 15.32 ms | 15.37 ms | 15.50 ms | | 4 | 273.84 img/s | 14.63 ms | 15.14 ms | 15.83 ms | 17.38 ms | | 8 | 509.35 img/s | 15.71 ms | 16.10 ms | 16.21 ms | 16.55 ms | | 16 | 770.02 img/s | 20.78 ms | 20.96 ms | 21.03 ms | 21.24 ms | | 32 | 926.46 img/s | 34.55 ms | 34.88 ms | 35.05 ms | 36.32 ms | | 64 | 1039.74 img/s | 61.55 ms | 61.82 ms | 61.99 ms | 62.32 ms | | 128 | 1102.00 img/s | 116.15 ms | 116.62 ms | 116.80 ms | 116.97 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 58.55 img/s | 17.12 ms | 17.21 ms | 17.28 ms | 17.42 ms | | 2 | 105.00 img/s | 19.10 ms | 19.29 ms | 19.36 ms | 19.67 ms | | 4 | 207.60 img/s | 19.31 ms | 19.59 ms | 19.67 ms | 19.84 ms | | 8 | 413.16 img/s | 19.37 ms | 19.77 ms | 19.87 ms | 20.24 ms | | 16 | 739.12 img/s | 21.80 ms | 24.48 ms | 24.71 ms | 26.93 ms | | 32 | 1196.83 img/s | 26.99 ms | 27.10 ms | 27.49 ms | 28.80 ms | | 64 | 1470.31 img/s | 43.74 ms | 44.02 ms | 44.18 ms | 46.28 ms | | 128 | 1683.63 img/s | 76.03 ms | 77.00 ms | 77.23 ms | 78.15 ms | ##### Inference performance: NVIDIA DGX-2 (1x V100 32G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-2 with (1x V100 32G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 71.44 img/s | 14.07 ms | 14.22 ms | 14.43 ms | 16.44 ms | | 2 | 149.68 img/s | 13.43 ms | 13.79 ms | 13.94 ms | 16.63 ms | | 4 | 183.01 img/s | 21.85 ms | 22.12 ms | 22.18 ms | 22.44 ms | | 8 | 220.67 img/s | 36.25 ms | 36.84 ms | 37.17 ms | 37.43 ms | | 16 | 310.27 img/s | 51.57 ms | 51.88 ms | 52.09 ms | 53.37 ms | | 32 | 381.41 img/s | 83.89 ms | 84.30 ms | 84.66 ms | 85.04 ms | | 64 | 440.37 img/s | 145.45 ms | 145.49 ms | 145.86 ms | 147.53 ms | | 128 | 483.84 img/s | 264.54 ms | 265.04 ms | 265.46 ms | 266.43 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 73.06 img/s | 13.74 ms | 14.07 ms | 14.20 ms | 14.35 ms | | 2 | 155.23 img/s | 12.95 ms | 13.13 ms | 13.33 ms | 15.49 ms | | 4 | 303.68 img/s | 13.23 ms | 13.38 ms | 13.46 ms | 14.34 ms | | 8 | 583.43 img/s | 13.72 ms | 13.90 ms | 14.08 ms | 15.47 ms | | 16 | 783.30 img/s | 20.43 ms | 20.66 ms | 21.31 ms | 21.97 ms | | 32 | 932.10 img/s | 34.34 ms | 34.71 ms | 34.81 ms | 35.70 ms | | 64 | 1058.07 img/s | 60.48 ms | 60.75 ms | 60.94 ms | 62.49 ms | | 128 | 1129.65 img/s | 113.30 ms | 113.53 ms | 113.66 ms | 114.81 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 66.43 img/s | 15.14 ms | 15.24 ms | 15.31 ms | 19.18 ms | | 2 | 122.85 img/s | 16.39 ms | 18.28 ms | 18.45 ms | 20.33 ms | | 4 | 247.80 img/s | 16.14 ms | 16.44 ms | 16.57 ms | 17.24 ms | | 8 | 498.19 img/s | 16.07 ms | 16.26 ms | 16.66 ms | 17.70 ms | | 16 | 831.20 img/s | 19.40 ms | 19.30 ms | 19.39 ms | 25.41 ms | | 32 | 1223.75 img/s | 26.42 ms | 26.31 ms | 26.70 ms | 29.88 ms | | 64 | 1520.64 img/s | 42.09 ms | 42.45 ms | 42.57 ms | 42.84 ms | | 128 | 1739.61 img/s | 73.58 ms | 73.98 ms | 74.17 ms | 74.72 ms | ##### Inference performance: NVIDIA T4 (1x T4 16G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA T4 with (1x T4 16G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 27.39 img/s | 36.68 ms | 38.85 ms | 39.01 ms | 40.40 ms | | 2 | 44.56 img/s | 44.96 ms | 46.25 ms | 46.92 ms | 48.92 ms | | 4 | 65.11 img/s | 61.43 ms | 62.22 ms | 62.93 ms | 65.01 ms | | 8 | 80.09 img/s | 99.88 ms | 100.34 ms | 100.85 ms | 101.79 ms | | 16 | 93.98 img/s | 170.24 ms | 170.72 ms | 171.27 ms | 171.98 ms | | 32 | 99.86 img/s | 320.42 ms | 320.99 ms | 321.37 ms | 322.28 ms | | 64 | 103.31 img/s | 619.44 ms | 620.08 ms | 620.55 ms | 622.19 ms | | 128 | 105.16 img/s | 1217.18 ms | 1218.09 ms | 1218.59 ms | 1221.16 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 57.21 img/s | 17.57 ms | 18.06 ms | 18.15 ms | 20.74 ms | | 2 | 80.34 img/s | 24.97 ms | 25.38 ms | 25.69 ms | 27.12 ms | | 4 | 115.12 img/s | 34.77 ms | 35.61 ms | 36.74 ms | 37.61 ms | | 8 | 147.51 img/s | 54.24 ms | 54.79 ms | 55.28 ms | 58.25 ms | | 16 | 173.83 img/s | 92.04 ms | 92.50 ms | 93.26 ms | 94.72 ms | | 32 | 182.19 img/s | 175.64 ms | 176.51 ms | 177.44 ms | 178.52 ms | | 64 | 193.20 img/s | 331.25 ms | 332.56 ms | 333.34 ms | 334.58 ms | | 128 | 195.17 img/s | 655.82 ms | 657.24 ms | 658.79 ms | 661.76 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 46.19 img/s | 21.72 ms | 21.90 ms | 21.93 ms | 23.64 ms | | 2 | 80.98 img/s | 24.77 ms | 24.99 ms | 25.15 ms | 25.63 ms | | 4 | 129.49 img/s | 30.89 ms | 31.26 ms | 31.34 ms | 32.31 ms | | 8 | 156.91 img/s | 51.00 ms | 52.17 ms | 52.51 ms | 53.32 ms | | 16 | 204.45 img/s | 78.26 ms | 79.58 ms | 79.96 ms | 80.44 ms | | 32 | 215.22 img/s | 148.68 ms | 149.63 ms | 150.41 ms | 151.62 ms | | 64 | 235.36 img/s | 272.05 ms | 273.56 ms | 274.33 ms | 275.86 ms | | 128 | 244.45 img/s | 523.62 ms | 525.12 ms | 525.89 ms | 528.42 ms | ## Release notes ### Changelog April 2023 - Ceased maintenance of ConvNets in TensorFlow1 April 2020 - Initial release August 2020 - Updated command line argument names - Added support for syntetic dataset with different image size January 2022 - Added barrier at the end of multiprocess run ### Known issues Performance without XLA enabled is low due to BN + ReLU fusion bug.
TensorFlow2/LanguageModeling/ELECTRA/data
data
WikicorpusTextFormatting
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os class WikicorpusTextFormatting: def __init__(self, wiki_path, output_filename, recursive = False): self.wiki_path = wiki_path self.recursive = recursive self.output_filename = output_filename # This puts one article per line def merge(self): with open(self.output_filename, mode='w', newline='\n') as ofile: for dirname in glob.glob(self.wiki_path + '/*/', recursive=False): for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive): print(filename) article_lines = [] article_open = False with open(filename, mode='r', newline='\n') as file: for line in file: if '<doc id=' in line: article_open = True elif '</doc>' in line: article_open = False for oline in article_lines[1:]: if oline != '\n': ofile.write(oline.rstrip() + " ") ofile.write("\n\n") article_lines = [] else: if article_open: article_lines.append(line)
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
model_lib_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object detection model library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import numpy as np import tensorflow as tf from tensorflow.contrib.tpu.python.tpu import tpu_config from tensorflow.contrib.tpu.python.tpu import tpu_estimator from object_detection import inputs from object_detection import model_hparams from object_detection import model_lib from object_detection.builders import model_builder from object_detection.core import standard_fields as fields from object_detection.utils import config_util # Model for test. Options are: # 'ssd_inception_v2_pets', 'faster_rcnn_resnet50_pets' MODEL_NAME_FOR_TEST = 'ssd_inception_v2_pets' def _get_data_path(): """Returns an absolute path to TFRecord file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'test_data', 'pets_examples.record') def get_pipeline_config_path(model_name): """Returns path to the local pipeline config file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'samples', 'configs', model_name + '.config') def _get_labelmap_path(): """Returns an absolute path to label map file.""" return os.path.join(tf.resource_loader.get_data_files_path(), 'data', 'pet_label_map.pbtxt') def _get_configs_for_model(model_name): """Returns configurations for model.""" filename = get_pipeline_config_path(model_name) data_path = _get_data_path() label_map_path = _get_labelmap_path() configs = config_util.get_configs_from_pipeline_file(filename) override_dict = { 'train_input_path': data_path, 'eval_input_path': data_path, 'label_map_path': label_map_path } configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) return configs def _make_initializable_iterator(dataset): """Creates an iterator, and initializes tables. Args: dataset: A `tf.data.Dataset` object. Returns: A `tf.data.Iterator`. """ iterator = dataset.make_initializable_iterator() tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer) return iterator class ModelLibTest(tf.test.TestCase): @classmethod def setUpClass(cls): tf.reset_default_graph() def _assert_model_fn_for_train_eval(self, configs, mode, class_agnostic=False): model_config = configs['model'] train_config = configs['train_config'] with tf.Graph().as_default(): if mode == 'train': features, labels = _make_initializable_iterator( inputs.create_train_input_fn(configs['train_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.TRAIN batch_size = train_config.batch_size elif mode == 'eval': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 elif mode == 'eval_on_train': features, labels = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['train_input_config'], configs['model'])()).get_next() model_mode = tf.estimator.ModeKeys.EVAL batch_size = 1 detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=True) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, labels, model_mode) self.assertIsNotNone(estimator_spec.loss) self.assertIsNotNone(estimator_spec.predictions) if mode == 'eval' or mode == 'eval_on_train': if class_agnostic: self.assertNotIn('detection_classes', estimator_spec.predictions) else: detection_classes = estimator_spec.predictions['detection_classes'] self.assertEqual(batch_size, detection_classes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_classes.dtype) detection_boxes = estimator_spec.predictions['detection_boxes'] detection_scores = estimator_spec.predictions['detection_scores'] num_detections = estimator_spec.predictions['num_detections'] self.assertEqual(batch_size, detection_boxes.shape.as_list()[0]) self.assertEqual(tf.float32, detection_boxes.dtype) self.assertEqual(batch_size, detection_scores.shape.as_list()[0]) self.assertEqual(tf.float32, detection_scores.dtype) self.assertEqual(tf.float32, num_detections.dtype) if mode == 'eval': self.assertIn('Detections_Left_Groundtruth_Right/0', estimator_spec.eval_metric_ops) if model_mode == tf.estimator.ModeKeys.TRAIN: self.assertIsNotNone(estimator_spec.train_op) return estimator_spec def _assert_model_fn_for_predict(self, configs): model_config = configs['model'] with tf.Graph().as_default(): features, _ = _make_initializable_iterator( inputs.create_eval_input_fn(configs['eval_config'], configs['eval_input_config'], configs['model'])()).get_next() detection_model_fn = functools.partial( model_builder.build, model_config=model_config, is_training=False) hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') model_fn = model_lib.create_model_fn(detection_model_fn, configs, hparams) estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT) self.assertIsNone(estimator_spec.loss) self.assertIsNone(estimator_spec.train_op) self.assertIsNotNone(estimator_spec.predictions) self.assertIsNotNone(estimator_spec.export_outputs) self.assertIn(tf.saved_model.signature_constants.PREDICT_METHOD_NAME, estimator_spec.export_outputs) def test_model_fn_in_train_mode(self): """Tests the model function in TRAIN mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_all_variables(self): """Tests model_fn TRAIN mode with all variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) configs['train_config'].freeze_variables.append('.*') with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_all_included_variables(self): """Tests model_fn TRAIN mode with all included variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) train_config = configs['train_config'] train_config.update_trainable_variables.append('FeatureExtractor') train_config.freeze_variables.append('.*') with self.assertRaisesRegexp(ValueError, 'No variables to optimize'): self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_train_mode_freeze_box_predictor(self): """Tests model_fn TRAIN mode with FeatureExtractor variables frozen.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) train_config = configs['train_config'] train_config.update_trainable_variables.append('FeatureExtractor') train_config.update_trainable_variables.append('BoxPredictor') train_config.freeze_variables.append('FeatureExtractor') self._assert_model_fn_for_train_eval(configs, 'train') def test_model_fn_in_eval_mode(self): """Tests the model function in EVAL mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'eval') def test_model_fn_in_eval_on_train_mode(self): """Tests the model function in EVAL mode with train data.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_train_eval(configs, 'eval_on_train') def test_model_fn_in_predict_mode(self): """Tests the model function in PREDICT mode.""" configs = _get_configs_for_model(MODEL_NAME_FOR_TEST) self._assert_model_fn_for_predict(configs) def test_create_estimator_and_inputs(self): """Tests that Estimator and input function are constructed correctly.""" run_config = tf.estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf.estimator.Estimator) self.assertEqual(20, train_steps) self.assertIn('train_input_fn', train_and_eval_dict) self.assertIn('eval_input_fns', train_and_eval_dict) self.assertIn('eval_on_train_input_fn', train_and_eval_dict) def test_create_estimator_with_default_train_eval_steps(self): """Tests that number of train/eval defaults to config values.""" run_config = tf.estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) config_train_steps = configs['train_config'].num_steps train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tf.estimator.Estimator) self.assertEqual(config_train_steps, train_steps) def test_create_tpu_estimator_and_inputs(self): """Tests that number of train/eval defaults to config values.""" run_config = tpu_config.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps, use_tpu_estimator=True) estimator = train_and_eval_dict['estimator'] train_steps = train_and_eval_dict['train_steps'] self.assertIsInstance(estimator, tpu_estimator.TPUEstimator) self.assertEqual(20, train_steps) def test_create_train_and_eval_specs(self): """Tests that `TrainSpec` and `EvalSpec` is created correctly.""" run_config = tf.estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) train_steps = 20 train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config, hparams, pipeline_config_path, train_steps=train_steps) train_input_fn = train_and_eval_dict['train_input_fn'] eval_input_fns = train_and_eval_dict['eval_input_fns'] eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_steps = train_and_eval_dict['train_steps'] train_spec, eval_specs = model_lib.create_train_and_eval_specs( train_input_fn, eval_input_fns, eval_on_train_input_fn, predict_input_fn, train_steps, eval_on_train_data=True, final_exporter_name='exporter', eval_spec_names=['holdout']) self.assertEqual(train_steps, train_spec.max_steps) self.assertEqual(2, len(eval_specs)) self.assertEqual(None, eval_specs[0].steps) self.assertEqual('holdout', eval_specs[0].name) self.assertEqual('exporter', eval_specs[0].exporters[0].name) self.assertEqual(None, eval_specs[1].steps) self.assertEqual('eval_on_train', eval_specs[1].name) def test_experiment(self): """Tests that the `Experiment` object is constructed correctly.""" run_config = tf.estimator.RunConfig() hparams = model_hparams.create_hparams( hparams_overrides='load_pretrained=false') pipeline_config_path = get_pipeline_config_path(MODEL_NAME_FOR_TEST) experiment = model_lib.populate_experiment( run_config, hparams, pipeline_config_path, train_steps=10, eval_steps=20) self.assertEqual(10, experiment.train_steps) self.assertEqual(None, experiment.eval_steps) class UnbatchTensorsTest(tf.test.TestCase): def test_unbatch_without_unpadding(self): image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, None, None]) groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, None, None]) groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, None]) tensor_dict = { fields.InputDataFields.image: image_placeholder, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes_placeholder, fields.InputDataFields.groundtruth_classes: groundtruth_classes_placeholder, fields.InputDataFields.groundtruth_weights: groundtruth_weights_placeholder } unbatched_tensor_dict = model_lib.unstack_batch( tensor_dict, unpad_groundtruth_tensors=False) with self.test_session() as sess: unbatched_tensor_dict_out = sess.run( unbatched_tensor_dict, feed_dict={ image_placeholder: np.random.rand(2, 4, 4, 3).astype(np.float32), groundtruth_boxes_placeholder: np.random.rand(2, 5, 4).astype(np.float32), groundtruth_classes_placeholder: np.random.rand(2, 5, 6).astype(np.float32), groundtruth_weights_placeholder: np.random.rand(2, 5).astype(np.float32) }) for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: self.assertAllEqual(image_out.shape, [4, 4, 3]) for groundtruth_boxes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_boxes]: self.assertAllEqual(groundtruth_boxes_out.shape, [5, 4]) for groundtruth_classes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_classes]: self.assertAllEqual(groundtruth_classes_out.shape, [5, 6]) for groundtruth_weights_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_weights]: self.assertAllEqual(groundtruth_weights_out.shape, [5]) def test_unbatch_and_unpad_groundtruth_tensors(self): image_placeholder = tf.placeholder(tf.float32, [2, None, None, None]) groundtruth_boxes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) groundtruth_classes_placeholder = tf.placeholder(tf.float32, [2, 5, None]) groundtruth_weights_placeholder = tf.placeholder(tf.float32, [2, 5]) num_groundtruth_placeholder = tf.placeholder(tf.int32, [2]) tensor_dict = { fields.InputDataFields.image: image_placeholder, fields.InputDataFields.groundtruth_boxes: groundtruth_boxes_placeholder, fields.InputDataFields.groundtruth_classes: groundtruth_classes_placeholder, fields.InputDataFields.groundtruth_weights: groundtruth_weights_placeholder, fields.InputDataFields.num_groundtruth_boxes: num_groundtruth_placeholder } unbatched_tensor_dict = model_lib.unstack_batch( tensor_dict, unpad_groundtruth_tensors=True) with self.test_session() as sess: unbatched_tensor_dict_out = sess.run( unbatched_tensor_dict, feed_dict={ image_placeholder: np.random.rand(2, 4, 4, 3).astype(np.float32), groundtruth_boxes_placeholder: np.random.rand(2, 5, 4).astype(np.float32), groundtruth_classes_placeholder: np.random.rand(2, 5, 6).astype(np.float32), groundtruth_weights_placeholder: np.random.rand(2, 5).astype(np.float32), num_groundtruth_placeholder: np.array([3, 3], np.int32) }) for image_out in unbatched_tensor_dict_out[fields.InputDataFields.image]: self.assertAllEqual(image_out.shape, [4, 4, 3]) for groundtruth_boxes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_boxes]: self.assertAllEqual(groundtruth_boxes_out.shape, [3, 4]) for groundtruth_classes_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_classes]: self.assertAllEqual(groundtruth_classes_out.shape, [3, 6]) for groundtruth_weights_out in unbatched_tensor_dict_out[ fields.InputDataFields.groundtruth_weights]: self.assertAllEqual(groundtruth_weights_out.shape, [3]) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp
trtis_cpp
build_trtis
#!/bin/bash ## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # NVIDIA_VISIBLE_DEVICES="${NVIDIA_VISIBLE_DEVICES:-all}" IMAGE_NAME="trt-tacotron2-waveglow.trtis" CONTAINER_NAME="trt-tacotron2-waveglow.trtis.container" die() { echo "ERROR: ${@}" 1>&2 exit 1 } if [[ $# != 4 && $# != 3 ]]; then echo "Unexpected number of arguments: $#" echo "USAGE:" echo "\t${0} <tacotron2 model> <waveglow model> <denoiser model> [use amp 0/1]" exit 1 fi # remove container if it exists if [[ "$(docker ps -f "name=${CONTAINER_NAME}" -qa | wc -l)" != "0" ]]; then docker rm "${CONTAINER_NAME}" fi TACOTRON2_MODEL="${1}" WAVEGLOW_MODEL="${2}" DENOISER_MODEL="${3}" AMP="${4:-1}" # copy models to build context mkdir -p tmp/ cp -v "${TACOTRON2_MODEL}" tmp/tacotron2.json && TACOTRON2_MODEL="tmp/tacotron2.json" || die "Failed to copy ${TACOTRON2_MODEL}" cp -v "${WAVEGLOW_MODEL}" tmp/waveglow.onnx && WAVEGLOW_MODEL="tmp/waveglow.onnx" || die "Failed to copy ${WAVEGLOW_MODEL}" cp -v "${DENOISER_MODEL}" tmp/denoiser.json && DENOISER_MODEL="tmp/denoiser.json" || die "Failed to copy ${DENOISER_MODEL}" docker build \ --build-arg TACOTRON2_MODEL="${TACOTRON2_MODEL}" \ --build-arg WAVEGLOW_MODEL="${WAVEGLOW_MODEL}" \ --build-arg DENOISER_MODEL="${DENOISER_MODEL}" \ -f Dockerfile.trtis . -t "${IMAGE_NAME}" || die "Failed to build docker container." nvidia-docker run \ -e "NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" \ --name "${CONTAINER_NAME}" \ "${IMAGE_NAME}" "./scripts/build_engines.sh" "${AMP}" || die "Failed to build engines." docker commit "${CONTAINER_NAME}" "${IMAGE_NAME}" || die "Failed commit changes." docker rm "${CONTAINER_NAME}"
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils
utils
box_utils
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Util functions to manipulate boxes.""" from __future__ import absolute_import, division, print_function import numpy as np import tensorflow as tf BBOX_XFORM_CLIP = np.log(1000. / 16.) NMS_TILE_SIZE = 512 def bbox_overlap(boxes, gt_boxes): """Calculates the overlap between proposal and ground truth boxes. Some `gt_boxes` may have been padded. The returned `iou` tensor for these boxes will be -1. Args: boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with a negative value. Returns: iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES]. """ with tf.name_scope('bbox_overlap'): bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(value=boxes, num_or_size_splits=4, axis=2) gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(value=gt_boxes, num_or_size_splits=4, axis=2) # Calculates the intersection area. i_xmin = tf.maximum(bb_x_min, tf.transpose(a=gt_x_min, perm=[0, 2, 1])) i_xmax = tf.minimum(bb_x_max, tf.transpose(a=gt_x_max, perm=[0, 2, 1])) i_ymin = tf.maximum(bb_y_min, tf.transpose(a=gt_y_min, perm=[0, 2, 1])) i_ymax = tf.minimum(bb_y_max, tf.transpose(a=gt_y_max, perm=[0, 2, 1])) i_area = tf.maximum((i_xmax - i_xmin), 0) * tf.maximum((i_ymax - i_ymin), 0) # Calculates the union area. bb_area = (bb_y_max - bb_y_min) * (bb_x_max - bb_x_min) gt_area = (gt_y_max - gt_y_min) * (gt_x_max - gt_x_min) # Adds a small epsilon to avoid divide-by-zero. u_area = bb_area + tf.transpose(a=gt_area, perm=[0, 2, 1]) - i_area + 1e-8 # Calculates IoU. iou = i_area / u_area # Fills -1 for padded ground truth boxes. padding_mask = tf.less(i_xmin, tf.zeros_like(i_xmin)) iou = tf.where(padding_mask, -tf.ones_like(iou), iou) return iou def top_k(scores, k, boxes_list): """A wrapper that returns top-k scores and correponding boxes. This functions selects the top-k scores and boxes as follows. indices = argsort(scores)[:k] scores = scores[indices] outputs = [] for boxes in boxes_list: outputs.append(boxes[indices, :]) return scores, outputs Args: scores: a tensor with a shape of [batch_size, N]. N is the number of scores. k: an integer for selecting the top-k elements. boxes_list: a list containing at least one element. Each element has a shape of [batch_size, N, 4]. Returns: scores: the selected top-k scores with a shape of [batch_size, k]. outputs: the list containing the corresponding boxes in the order of the input `boxes_list`. """ assert isinstance(boxes_list, list) assert boxes_list # not empty list batch_size, _ = scores.get_shape().as_list() scores, top_k_indices = tf.nn.top_k(scores, k=k) outputs = [] for boxes in boxes_list: if batch_size == 1: boxes = tf.squeeze(tf.gather(boxes, top_k_indices, axis=1), axis=1) else: boxes_index_offsets = tf.range(batch_size) * tf.shape(input=boxes)[1] boxes_indices = tf.reshape( top_k_indices + tf.expand_dims(boxes_index_offsets, 1), [-1]) boxes = tf.reshape( tf.gather(tf.reshape(boxes, [-1, 4]), boxes_indices), [batch_size, -1, 4]) outputs.append(boxes) return scores, outputs def _self_suppression(iou, _, iou_sum): batch_size = tf.shape(input=iou)[0] can_suppress_others = tf.cast( tf.reshape(tf.reduce_max(input_tensor=iou, axis=1) <= 0.5, [batch_size, -1, 1]), iou.dtype) iou_suppressed = tf.reshape( tf.cast(tf.reduce_max(input_tensor=can_suppress_others * iou, axis=1) <= 0.5, iou.dtype), [batch_size, -1, 1]) * iou iou_sum_new = tf.reduce_sum(input_tensor=iou_suppressed, axis=[1, 2]) return [ iou_suppressed, tf.reduce_any(input_tensor=iou_sum - iou_sum_new > 0.5), iou_sum_new ] def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx): batch_size = tf.shape(input=boxes)[0] new_slice = tf.slice(boxes, [0, inner_idx * NMS_TILE_SIZE, 0], [batch_size, NMS_TILE_SIZE, 4]) iou = bbox_overlap(new_slice, box_slice) ret_slice = tf.expand_dims( tf.cast(tf.reduce_all(input_tensor=iou < iou_threshold, axis=[1]), box_slice.dtype), 2) * box_slice return boxes, ret_slice, iou_threshold, inner_idx + 1 def _suppression_loop_body(boxes, iou_threshold, output_size, idx): """Process boxes in the range [idx*NMS_TILE_SIZE, (idx+1)*NMS_TILE_SIZE). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable. """ num_tiles = tf.shape(input=boxes)[1] // NMS_TILE_SIZE batch_size = tf.shape(input=boxes)[0] # Iterates over tiles that can possibly suppress the current tile. box_slice = tf.slice(boxes, [0, idx * NMS_TILE_SIZE, 0], [batch_size, NMS_TILE_SIZE, 4]) _, box_slice, _, _ = tf.while_loop( cond=lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, body=_cross_suppression, loop_vars=[boxes, box_slice, iou_threshold, tf.constant(0)]) # Iterates over the current tile to compute self-suppression. iou = bbox_overlap(box_slice, box_slice) mask = tf.expand_dims( tf.reshape(tf.range(NMS_TILE_SIZE), [1, -1]) > tf.reshape( tf.range(NMS_TILE_SIZE), [-1, 1]), 0) iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _ = tf.while_loop( cond=lambda _iou, loop_condition, _iou_sum: loop_condition, body=_self_suppression, loop_vars=[iou, tf.constant(True), tf.reduce_sum(input_tensor=iou, axis=[1, 2])]) suppressed_box = tf.reduce_sum(input_tensor=suppressed_iou, axis=1) > 0 box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2) # Uses box_slice to update the input boxes. mask = tf.reshape( tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = tf.tile(tf.expand_dims( box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape( boxes, [batch_size, num_tiles, NMS_TILE_SIZE, 4]) * (1 - mask) boxes = tf.reshape(boxes, [batch_size, -1, 4]) # Updates output_size. output_size += tf.reduce_sum( input_tensor=tf.cast(tf.reduce_any(input_tensor=box_slice > 0, axis=[2]), tf.int32), axis=[1]) return boxes, iou_threshold, output_size, idx + 1 def sorted_non_max_suppression_padded(scores, boxes, max_output_size, iou_threshold): """A wrapper that handles non-maximum suppression. Assumption: * The boxes are sorted by scores unless the box is a dot (all coordinates are zero). * Boxes with higher scores can be used to suppress boxes with lower scores. The overal design of the algorithm is to handle boxes tile-by-tile: boxes = boxes.pad_to_multiply_of(tile_size) num_tiles = len(boxes) // tile_size output_boxes = [] for i in range(num_tiles): box_tile = boxes[i*tile_size : (i+1)*tile_size] for j in range(i - 1): suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] iou = bbox_overlap(box_tile, suppressing_tile) # if the box is suppressed in iou, clear it to a dot box_tile *= _update_boxes(iou) # Iteratively handle the diagnal tile. iou = _box_overlap(box_tile, box_tile) iou_changed = True while iou_changed: # boxes that are not suppressed by anything else suppressing_boxes = _get_suppressing_boxes(iou) # boxes that are suppressed by suppressing_boxes suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) # clear iou to 0 for boxes that are suppressed, as they cannot be used # to suppress other boxes any more new_iou = _clear_iou(iou, suppressed_boxes) iou_changed = (new_iou != iou) iou = new_iou # remaining boxes that can still suppress others, are selected boxes. output_boxes.append(_get_suppressing_boxes(iou)) if len(output_boxes) >= max_output_size: break Args: scores: a tensor with a shape of [batch_size, anchors]. boxes: a tensor with a shape of [batch_size, anchors, 4]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. Returns: nms_scores: a tensor with a shape of [batch_size, anchors]. It has same dtype as input scores. nms_proposals: a tensor with a shape of [batch_size, anchors, 4]. It has same dtype as input boxes. """ batch_size = tf.shape(input=boxes)[0] num_boxes = tf.shape(input=boxes)[1] pad = tf.cast( tf.math.ceil(tf.cast(num_boxes, tf.float32) / NMS_TILE_SIZE), tf.int32) * NMS_TILE_SIZE - num_boxes boxes = tf.pad(tensor=tf.cast(boxes, tf.float32), paddings=[[0, 0], [0, pad], [0, 0]]) scores = tf.pad(tensor=tf.cast(scores, tf.float32), paddings=[[0, 0], [0, pad]]) num_boxes += pad def _loop_cond(unused_boxes, unused_threshold, output_size, idx): return tf.logical_and( tf.reduce_min(input_tensor=output_size) < max_output_size, idx < num_boxes // NMS_TILE_SIZE) selected_boxes, _, output_size, _ = tf.while_loop( cond=_loop_cond, body=_suppression_loop_body, loop_vars=[ boxes, iou_threshold, tf.zeros([batch_size], tf.int32), tf.constant(0) ]) idx = num_boxes - tf.cast( tf.nn.top_k( tf.cast(tf.reduce_any(input_tensor=selected_boxes > 0, axis=[2]), tf.int32) * tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0], tf.int32) idx = tf.minimum(idx, num_boxes - 1) idx = tf.reshape( idx + tf.reshape(tf.range(batch_size) * num_boxes, [-1, 1]), [-1]) boxes = tf.reshape( tf.gather(tf.reshape(boxes, [-1, 4]), idx), [batch_size, max_output_size, 4]) boxes = boxes * tf.cast( tf.reshape(tf.range(max_output_size), [1, -1, 1]) < tf.reshape( output_size, [-1, 1, 1]), boxes.dtype) scores = tf.reshape( tf.gather(tf.reshape(scores, [-1, 1]), idx), [batch_size, max_output_size]) scores = scores * tf.cast( tf.reshape(tf.range(max_output_size), [1, -1]) < tf.reshape( output_size, [-1, 1]), scores.dtype) return scores, boxes def encode_boxes(boxes, anchors, weights=None): """Encode boxes to targets. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. anchors: a tensor whose shape is the same as `boxes` representing the coordinates of anchors in ymin, xmin, ymax, xmax order. weights: None or a list of four float numbers used to scale coordinates. Returns: encoded_boxes: a tensor whose shape is the same as `boxes` representing the encoded box targets. """ with tf.name_scope('encode_box'): boxes = tf.cast(boxes, dtype=anchors.dtype) y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1) # y_min = boxes[..., 0:1] # x_min = boxes[..., 1:2] # y_max = boxes[..., 2:3] # x_max = boxes[..., 3:4] box_h = y_max - y_min + 1.0 box_w = x_max - x_min + 1.0 box_yc = y_min + 0.5 * box_h box_xc = x_min + 0.5 * box_w anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(anchors, 4, axis=-1) # anchor_ymin = anchors[..., 0:1] # anchor_xmin = anchors[..., 1:2] # anchor_ymax = anchors[..., 2:3] # anchor_xmax = anchors[..., 3:4] anchor_h = anchor_ymax - anchor_ymin + 1.0 anchor_w = anchor_xmax - anchor_xmin + 1.0 anchor_yc = anchor_ymin + 0.5 * anchor_h anchor_xc = anchor_xmin + 0.5 * anchor_w encoded_dy = (box_yc - anchor_yc) / anchor_h encoded_dx = (box_xc - anchor_xc) / anchor_w encoded_dh = tf.math.log(box_h / anchor_h) encoded_dw = tf.math.log(box_w / anchor_w) if weights: encoded_dy *= weights[0] encoded_dx *= weights[1] encoded_dh *= weights[2] encoded_dw *= weights[3] encoded_boxes = tf.concat([encoded_dy, encoded_dx, encoded_dh, encoded_dw], axis=-1) return encoded_boxes def decode_boxes(encoded_boxes, anchors, weights=None): """Decode boxes. Args: encoded_boxes: a tensor whose last dimension is 4 representing the coordinates of encoded boxes in ymin, xmin, ymax, xmax order. anchors: a tensor whose shape is the same as `boxes` representing the coordinates of anchors in ymin, xmin, ymax, xmax order. weights: None or a list of four float numbers used to scale coordinates. Returns: encoded_boxes: a tensor whose shape is the same as `boxes` representing the decoded box targets. """ with tf.name_scope('decode_box'): encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype) dy, dx, dh, dw = tf.split(encoded_boxes, 4, axis=-1) # dy = encoded_boxes[..., 0:1] # dx = encoded_boxes[..., 1:2] # dh = encoded_boxes[..., 2:3] # dw = encoded_boxes[..., 3:4] if weights: dy /= weights[0] dx /= weights[1] dh /= weights[2] dw /= weights[3] dh = tf.minimum(dh, BBOX_XFORM_CLIP) dw = tf.minimum(dw, BBOX_XFORM_CLIP) anchor_ymin, anchor_xmin, anchor_ymax, anchor_xmax = tf.split(anchors, 4, axis=-1) # anchor_ymin = anchors[..., 0:1] # anchor_xmin = anchors[..., 1:2] # anchor_ymax = anchors[..., 2:3] # anchor_xmax = anchors[..., 3:4] anchor_h = anchor_ymax - anchor_ymin + 1.0 anchor_w = anchor_xmax - anchor_xmin + 1.0 anchor_yc = anchor_ymin + 0.5 * anchor_h anchor_xc = anchor_xmin + 0.5 * anchor_w decoded_boxes_yc = dy * anchor_h + anchor_yc decoded_boxes_xc = dx * anchor_w + anchor_xc decoded_boxes_h = tf.exp(dh) * anchor_h decoded_boxes_w = tf.exp(dw) * anchor_w decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w decoded_boxes_ymax = decoded_boxes_ymin + decoded_boxes_h - 1.0 decoded_boxes_xmax = decoded_boxes_xmin + decoded_boxes_w - 1.0 decoded_boxes = tf.concat( [decoded_boxes_ymin, decoded_boxes_xmin, decoded_boxes_ymax, decoded_boxes_xmax], axis=-1 ) return decoded_boxes def clip_boxes(boxes, height, width): """Clip boxes. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. height: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the height of the image. width: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the width of the image. Returns: clipped_boxes: a tensor whose shape is the same as `boxes` representing the clipped boxes. """ with tf.name_scope('clip_box'): y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1) # y_min = boxes[..., 0:1] # x_min = boxes[..., 1:2] # y_max = boxes[..., 2:3] # x_max = boxes[..., 3:4] height = tf.cast(height, dtype=boxes.dtype) width = tf.cast(width, dtype=boxes.dtype) clipped_y_min = tf.maximum(tf.minimum(y_min, height - 1.0), 0.0) clipped_y_max = tf.maximum(tf.minimum(y_max, height - 1.0), 0.0) clipped_x_min = tf.maximum(tf.minimum(x_min, width - 1.0), 0.0) clipped_x_max = tf.maximum(tf.minimum(x_max, width - 1.0), 0.0) clipped_boxes = tf.concat([clipped_y_min, clipped_x_min, clipped_y_max, clipped_x_max], axis=-1) return clipped_boxes def filter_boxes(boxes, scores, min_size, height, width, scale): """Filter out boxes that are too small. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. scores: a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the scores. min_size: an integer specifying the minimal size. height: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the height of the image. width: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the width of the image. scale: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the scale of the image. Returns: filtered_boxes: a tensor whose shape is the same as `boxes` representing the filtered boxes. filtered_scores: a tensor whose shape is the same as `scores` representing the filtered scores. """ with tf.name_scope('filter_box'): y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1) # y_min = boxes[..., 0:1] # x_min = boxes[..., 1:2] # y_max = boxes[..., 2:3] # x_max = boxes[..., 3:4] h = y_max - y_min + 1.0 w = x_max - x_min + 1.0 yc = y_min + h / 2.0 xc = x_min + w / 2.0 height = tf.cast(height, dtype=boxes.dtype) width = tf.cast(width, dtype=boxes.dtype) scale = tf.cast(scale, dtype=boxes.dtype) min_size = tf.cast(tf.maximum(min_size, 1), dtype=boxes.dtype) size_mask = tf.logical_and( tf.greater_equal(h, min_size * scale), tf.greater_equal(w, min_size * scale) ) center_mask = tf.logical_and(tf.less(yc, height), tf.less(xc, width)) selected_mask = tf.logical_and(size_mask, center_mask) filtered_scores = tf.where(selected_mask, scores, tf.zeros_like(scores)) filtered_boxes = tf.cast(selected_mask, dtype=boxes.dtype) * boxes return filtered_boxes, filtered_scores def to_normalized_coordinates(boxes, height, width): """Converted absolute box coordinates to normalized ones. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. height: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the height of the image. width: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the width of the image. Returns: normalized_boxes: a tensor whose shape is the same as `boxes` representing the boxes in normalized coordinates. """ with tf.name_scope('normalize_box'): height = tf.cast(height, dtype=boxes.dtype) width = tf.cast(width, dtype=boxes.dtype) y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1) y_min = y_min / height x_min = x_min / width y_max = y_max / height x_max = x_max / width # y_min = boxes[..., 0:1] / height # x_min = boxes[..., 1:2] / width # y_max = boxes[..., 2:3] / height # x_max = boxes[..., 3:4] / width normalized_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1) return normalized_boxes def to_absolute_coordinates(boxes, height, width): """Converted normalized box coordinates to absolute ones. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. height: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the height of the image. width: an integer, a scalar or a tensor such as all but the last dimensions are the same as `boxes`. The last dimension is 1. It represents the width of the image. Returns: absolute_boxes: a tensor whose shape is the same as `boxes` representing the boxes in absolute coordinates. """ with tf.name_scope('denormalize_box'): height = tf.cast(height, dtype=boxes.dtype) width = tf.cast(width, dtype=boxes.dtype) y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=-1) y_min = y_min * height x_min = x_min * width y_max = y_max * height x_max = x_max * width # y_min = boxes[..., 0:1] * height # x_min = boxes[..., 1:2] * width # y_max = boxes[..., 2:3] * height # x_max = boxes[..., 3:4] * width absolute_boxes = tf.concat([y_min, x_min, y_max, x_max], axis=-1) return absolute_boxes
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/structures
structures
image_list
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. from __future__ import division import torch class ImageList(object): """ Structure that holds a list of images (of possibly varying sizes) as a single tensor. This works by padding the images to the same size, and storing in a field the original sizes of each image """ def __init__(self, tensors, image_sizes): """ Arguments: tensors (tensor) image_sizes (list[tuple[int, int]]) """ self.tensors = tensors self.image_sizes = image_sizes def to(self, *args, **kwargs): cast_tensor = self.tensors.to(*args, **kwargs) return ImageList(cast_tensor, self.image_sizes) def to_nhwc(self): nhwc_tensor = self.tensors.to(memory_format=torch.channels_last) return ImageList(nhwc_tensor, self.image_sizes) def to_image_list(tensors, size_divisible=0): """ tensors can be an ImageList, a torch.Tensor or an iterable of Tensors. It can't be a numpy array. When tensors is an iterable of Tensors, it pads the Tensors with zeros so that they have the same shape """ if isinstance(tensors, torch.Tensor) and size_divisible > 0: tensors = [tensors] if isinstance(tensors, ImageList): return tensors elif isinstance(tensors, torch.Tensor): # single tensor shape can be inferred assert tensors.dim() == 4 image_sizes = [tensor.shape[-2:] for tensor in tensors] return ImageList(tensors, image_sizes) elif isinstance(tensors, (tuple, list)): max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors])) # TODO Ideally, just remove this and let me model handle arbitrary # input sizs if size_divisible > 0: import math stride = size_divisible max_size = list(max_size) max_size[1] = int(math.ceil(max_size[1] / stride) * stride) max_size[2] = int(math.ceil(max_size[2] / stride) * stride) max_size = tuple(max_size) batch_shape = (len(tensors),) + max_size batched_imgs = tensors[0].new(*batch_shape).zero_() for img, pad_img in zip(tensors, batched_imgs): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) image_sizes = [im.shape[-2:] for im in tensors] return ImageList(batched_imgs, image_sizes) else: raise TypeError("Unsupported type for to_image_list: {}".format(type(tensors)))
FasterTransformer
FasterTransformer
README
# FasterTransformer ## This repo can now be found here: https://github.com/NVIDIA/FasterTransformer.
Tools/PyTorch/TimeSeriesPredictionPlatform
TimeSeriesPredictionPlatform
README
# Time-Series Prediction Platform 1.1 for PyTorch Time-series prediction is a common problem in multiple domains for various applications, including retail, industry, smart cities, and financial services. Research in the time-series field is growing exponentially, with hundreds of deep learning time-series forecasting paper submissions to ICML, ECML, ITISE, and multiple journals every year. However, there is currently no common framework to compare the accuracy and performance of all the models from the industry or academia. ## Solution Overview Time-Series Prediction Platform (TSPP) enables users to mix and match datasets and models. In this case, the user has complete control over the following settings and can compare side-by-side results obtained from various solutions. These include: - Evaluation metrics - Evaluation datasets - Prediction horizons - Prediction sliding window sizes Model choice - Model hyperparameters ### Time-Series Prediction Platform architecture The platform has the following architecture. ![Time-series Prediction Platform architecture ](TSPP_Architecture.png) In the previous figure, the command line feeds the input to the TSPP launcher, which uses said input to configure the components required to train and test the model. The platform is designed to support multiple data types for input features, including the observed values of the forecasted time-series, known data supporting the forecasts (for example, day of the week), and static data (for example, user ID). This is summarized in the following figure. <div align="center"> <img width="70%" src="https://developer.download.nvidia.com/time-series-platform/time_series_data.png" title="Time-series data type"> <p style="text-align:center"><b>Time-series data type</b></p> <br> </div> ### Default configuration The TSPP utilizes the default configurations provided by each model for each accompanying dataset. More information on individual model configurations can be found within the respective model repositories. By default, Temporal Fusion Transformer (TFT) is included within the TSPP. ### Models - Temporal Fusion Transformers - XGBoost - AutoARIMA - LSTM ### Feature support matrix This tool supports the following features: | Feature | Time-Series Prediction Platform |-----------------------|-------------------------- |[Automatic mixed precision (AMP)](https://pytorch.org/docs/stable/amp.html)| Yes |[Multi-GPU training with (PyTorch DDP)](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html) | Yes |[TorchScript, ONNX, and TRT conversion and NVIDIA Triton Deployment] | Yes #### Features [Automatic mixed precision](https://pytorch.org/docs/stable/amp.html) is a mode of computation for PyTorch models that allows operations to use float16 operations instead of float32 operations, potentially accelerating selected operations and total model runtime. More information can be found under the Mixed precision training section. Multi-GPU training with [PyTorch Distributed Data Parallel](https://pytorch.org/tutorials/intermediate/ddp_tutorial.html) is a mode of computation for PyTorch models that allows operations to be executed across multiple GPUs in parallel to accelerate computation. **TorchScript, ONNX, and TRT conversion and NVIDIA Triton Deployment** refer to the conversion of a model to the aforementioned formats and the ability to deploy the resulting converted models to an NVIDIA Triton inference server. More detail about this process and native inference can be found in the Advanced tab under the Conversion, Deployment, and Inference subsection. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere Architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the NVIDIA Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and use AMP for PyTorch, refer to [Torch-AMP](https://pytorch.org/docs/stable/amp.html) guide. #### Enabling mixed precision Mixed precision can be enabled by specifying `trainer.config.amp=True` in the launch call. For some cases, when the batch size is small, the overhead of scheduling kernels for mixed precision can be larger than the performance gain from using lower precision, effectively succeeding with lower throughput. ## Setup The following section lists the requirements you need to meet to run the Time-Series Prediction Platform. ### Requirements This repository contains a Dockerfile that extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Ampere Architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU - Ubuntu 20.04 - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - Custom Docker containers built for this tool. Refer to the steps in the [Quick Start Guide](#quick-start-guide). For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) For those unable to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick start guide ### Getting Started 1. Create a dataset directory. The directory can be arbitrary, and it is recommended not to include it in the TimeSeriesPredictionPlatform directory. This arbitrary directory will be mounted to the TSPP container later. In the following steps, this directory will be referred to as /your/datasets/. 2. Enter the Deep Learning Examples TSPP repository: ``` cd DeeplearningExamples/Tools/TimeSeriesPredictionPlatform ``` 3. Copy the relevant temporal fusion transformer code to the TSPP: ``` mkdir -p models/tft_pyt/ && cp ../../PyTorch/Forecasting/TFT/modeling.py models/tft_pyt/ ``` 4. Build the docker image: ``` docker build -t tspp . ``` 5. Next, we will start our container and mount the dataset directory, which means that /workspace/datasets/ points to /your/datasets/. Any changes made to this folder in the docker container are reflected in the original directory and vice versa. If we want to mount additional folders, we can add ‘-v /path/on/local/:/path/in/container/’ to the run command. This will be useful if we want to save the outputs from training or inference once we close the container. To start the docker container: ``` docker run -it --gpus all --ipc=host --network=host -v /your/datasets/:/workspace/datasets/ tspp bash ``` 6. After running the previous command, you will be placed inside the docker container in the /workspace directory. Inside the container, download either the `electricity` or `traffic` dataset: ``` python data/script_download_data.py --dataset {dataset_name} --output_dir /workspace/datasets/ ``` The raw electricity dataset is the 15-minute electricity consumption of 370 customers from the UCI Electricity Load Diagrams. We aggregate to an hourly forecast and use the previous week to predict the following day. The raw traffic dataset is the 10-minute occupancy rate of San Francisco freeways from 440 sensors downloaded from the UCI PEMS-SF Data Set. We again aggregate to an hourly forecast and use the previous week to predict the following day. 7. Preprocess the dataset: ``` python launch_preproc.py dataset={dataset_name} ``` 8. Launch the training, validation, and testing process using the temporal fusion transformer model: ``` python launch_training.py model=tft dataset={dataset_name} trainer/criterion=quantile ``` Outputs are stored in /workspace/outputs/{date}/{time} ### Adding a new dataset The TSPP has been designed to work with most CSV sources. To add an arbitrary dataset to the TSPP: 1. Enter the Deep Learning Examples TSPP repository: ``` cd DeeplearningExamples/Tools/TimeSeriesPredictionPlatform ``` 2. Do a preliminary data transposition. TSPP `launch_preproc.py` script is designed to work with CSV input. Each row should contain only a single datapoint. CSV should contain at least three columns: one for time feature, one for labels, and one for dataset ID (we assume a single file will contain data for multiple correlated time series). For reference, see `data/script_download_data.py` script. 3. Include the target dataset in the directory where you want to keep your datasets. The directory can be arbitrary, and it is recommended not to include it in the TimeSeriesPredictionPlatform directory. This arbitrary directory will be mounted to the TSPP container later 4. Create a configuration file for your dataset, found in TimeSeriesPredictionPlatform/conf/dataset, that includes the following values: * source_path: The path to the CSV that contains your dataset * dest_path: The path to where preprocessing should write your preprocessed dataset * time_ids: The name of the column within your source CSV that is the feature to split your training, validation, and test datasets on. * train_range, valid_range, test_range: The ranges that mark the edges of the train, validation, and test subsets. Remember that subsets can overlap since predicting the first ‘unseen element’ requires the input of the seen elements before it. As an alternative, a valid_boundary can be specified, which marks the end of training. Then from the valid boundary, the next horizon length number of entries are for validation, and finally, following the end of the validation set, the next horizon length number of entries are for testing. * dataset_stride: The stride the dataloader uses to walk the sliding window through the dataset. Default: 1 * scale_per_id: Whether to scale continuous features during preprocessing using scalers fitted on just samples from the same ID (True), or all samples (False, Default) * encoder_length: The length of data known up until the ‘present’ * example_length: The length of all data, including data known into the future. The prediction horizon is the difference between example_length and encoder_length. * features: A list of the features that the model takes as input. Each feature should be represented by an object containing descriptive attributes. All features should have at least a feature_type (ID, TIME, TARGET, WEIGHT, SAMPLE_WEIGHT, KNOWN, OBSERVED, or STATIC) and feature_embed_type (CONTINUOUS or CATEGORICAL). Continuous features may have a scaler attribute that represents the type of scaler used in preprocessing. Categorical columns should have a cardinality attribute that represents the number of unique values the feature takes plus one (this is due to mapping NaNs to 0 in all cases). Examples can be found in the files in /TimeSeriesPredictionPlatform/conf/dataset/. Required features are one TIME feature, at least one ID feature, one TARGET feature, and at least one KNOWN, OBSERVED, or STATIC feature. * train_samples: The number of samples that should be taken at train time to use as train input to your model for a single epoch * valid_samples: The number of samples that should be taken at train time to use as validation input to your model for a single epoch * binarized: Whether or not preprocessing should accelerate data loading by outputting the preprocessed dataset in a binarized format * time_series_count: The number of unique time-series contained in the dataset. 5. After a specification has been written, it is ready to be preprocessed with: ``` docker build -t tspp . docker run -it --gpus all -v /your/datasets/:/workspace/datasets/ --ipc=host tspp bash python launch_preproc.py dataset={dataset_name} ``` For some models, additional parameters are required per dataset. As mentioned in the Adding a new model section, there are examples of these model-dataset combination files in `TimeSeriesPredictionPlatform/conf/model_dataset/`. An example would be model A requiring a specific hidden size when used on dataset B. In this case, TimeSeriesPredictionPlatform/conf/model_dataset/A_B.yaml should contain the desired hidden size under model.config.hidden_size 6. Test your dataset by training and evaluating a Temporal Fusion Transformer. Training, validation, and testing are all included by default using the launch_training.py command shown below: ``` docker run -it --gpus all -v /your/datasets/:/workspace/datasets/ --ipc=host tspp bash python launch_training.py dataset={YOUR_DATASET} model=tft trainer/criterion=quantile ``` ### Adding a new model Models added to the prediction platform are subject to a few key constraints. Namely, the models should be constructed using vanilla PyTorch. Models should handle the forecasting task (anomaly detection and classification are planned); models should expect that the data is fed in a sliding window and that tensors will be aggregated by Temporal/Data Type. An example of this can be found in data/dataset.py. \ The default format of the data batch is a dictionary with tensors representing different kinds of covariates. A complete list of the tensors can be found in a batch: ``` FEAT_NAMES = ["s_cat", "s_cont", "k_cat", "k_cont", "o_cat", "o_cont", "target", "weight", "sample_weight", "id"] ``` To integrate a model into the TSPP: 1. Enter the Deep Learning Examples repository: ``` cd DeeplearningExamples ``` 2. Copy the model files into the Deep Learning Examples DeeplearningExamples/Tools/PyTorch/TimeSeriesPredictionPlatform/models/ directory: ``` cp -r /PATH/TO/YOUR/MODEL Tools/PyTorch/TimeSeriesPredictionPlatform/models ``` 3. Write a configuration file for the model in `DeeplearningExamples/Tools/TimeSeriesPredictionPlatform/conf/model`. This configuration file should reflect the default configuration for your model. Within this file, the _target_ of the model component should be set to point to your model class. If your model needs additional configuration values based on the dataset, you should create a configuration file in `DeeplearningExamples/Tools/TimeSeriesPredictionPlatform/conf/model_dataset/{modelname_datasetname.yaml}` named according to the model and dataset names. Examples can be found in the `DeeplearningExamples/Tools/TimeSeriesPredictionPlatform/conf/model/tft.yaml` and `DeeplearningExamples/Tools/TimeSeriesPredictionPlatform/conf/model_dataset/tft_traffic.yaml` files. 4. Build and launch container: ``` cd DeeplearningExamples/Tools/ docker build -t tspp TimeSeriesPredictionPlatform docker run -it --rm --ipc=host --network=host --gpus all -v /your/datasets/:/workspace/datasets/ tspp bash ``` 5. Verify that the model can be run within the TSPP: ``` python launch_training.py model={model_name} ``` Some additional values may be needed in this call. For example, if your model requires the Gaussian NLL criterion, you will need to append trainer/criterion=GLL to your call. ## Advanced The following sections provide more details about changing the dataset, altering the data preprocessing, and comparing the training results. ### Running multi-GPU experiments Launching on multi-GPU requires no changes to model code and can be executed as follows within a TSPP container: ``` python launch_training.py -m hydra/launcher=torchrun hydra.launcher.nproc_per_node={num_gpus} {override parameters} ``` Statistical models (like AutoARIMA) are not run on GPU, so they are unsuitable for multi-GPU acceleration. In addition, XGBoost has a separate way of doing multi-GPU acceleration. ### Parallel training While doing seed sweeps or hp searches on a machine with more than one GPU, we can parallelize the workload by using the `joblib` hydra plugin. To use the plugin, one has to specify `hydra/launcher=joblib` together with the number of parallel jobs `hydra.launcher.n_jobs=8`. For example: ```bash python launch_training.py \ -m \ seed='range(1,17)' \ model=tft \ dataset=electricity \ trainer/criterion=quantile \ trainer.config.num_epochs=3 \ hydra/launcher=joblib \ hydra.launcher.n_jobs=8 \ hydra.sweeper.max_batch_size=8 ``` *Warning*: Sweeper sends jobs to a launcher in batches. In order to avoid race conditions, specify sweeper batch size to exactly match the number of parallel jobs. For the default sweeper it would be: `hydra.sweeper.max_batch_size=8`, and for optuna sweeper: `hydra.sweeper.n_jobs=8`. ### Running experiments with Exponential Moving Averaging Exponential moving averaging is a technique in which, while training, the model weights are integrated into a weighted moving average, and the weighted moving average is used in lieu of the directly trained model weights at test time. Our experiments have found this technique improves the convergence properties of most models and datasets we work with. The full paper of EMA can be found here (https://arxiv.org/pdf/1803.05407.pdf) To activate EMA in the TSPP, specify `trainer.config.ema=True` in the command line call at runtime. The decay parameter in the moving average can be modified using the `+trainer.config.ema_decay={decay}`. ### Running experiments with Curriculum Learning To use curriculum learning in your training, specify `trainer.config.cl_start_horizon` and `trainer.config.cl_update` config fields. [More on CL](https://dl.acm.org/doi/pdf/10.1145/1553374.1553380) ### Hyperparameter Search Hyperparameter searches can be used to find close-to-optimal hyperparameter configurations for a given model or dataset. In the TSPP, hyperparameter searches are driven by Optuna. To launch a hyperparameter search, use: ```bash python launch_training.py -m hydra/sweeper=optuna hydra.sweeper.n_trials={N} {parameter_ranges} ``` For more info how to properly set up {parameter_ranges} visit [hydra docs](https://hydra.cc/docs/plugins/optuna_sweeper/#search-space-configuration) ### XGBoost Training XGBoost and RAPIDS packages are now automatically present in the base NGC PyTorch containers. The TSPP is able to leverage this and allow users to perform training, inference, and deployment on XGBoost and Dask XGBoost using the same commands as Neural Network models. To train: ```bash python launch_training.py model={xgboost, dask_xgboost} dataset={dataset} ``` Note: All stages of XGBoost are run on GPU. CPU training is currently not supported. This launches training using CSV files from the output of preprocessing. Validation data is automatically used for early stopping if applicable. The TSPP trains a separate XGBoost model for each step in the horizon. If some arbitrary row in the dataframe is at time `t`, then for the ith model, we train it to predict timestep `t+i`. As a part of this, we give the model access to all the features at time step t and bring up the static and known features at timestep `t+i`. Each ID is handled separately, so for any given training/prediction sample, there is only data from 1 ID. XGBoost itself cannot create new features or process features in the same way as neural networks. To this end, we have created a framework where one can specify lag_features and moving_average_features. Lag_features allow the XGBoost model to have access to the values of the given feature in the past, while moving_average_features allow the model to have access to the moving average of the given feature to some number of previous time steps. For an example of how to specify these features, take a look at conf/model_dataset/xgboost_electricity.yaml. To specify a lag_feature, one needs to select a feature, a min value, and a max value. The TSPP then automatically adds the values of that feature at timestep `t-min_value` to `t-max_value`. Instead of specifying min and max, one can also specify value, which is a list of values for finer control. Note the values must be greater than 0 and must be natural numbers. To specify a moveing_average_feature, one needs to select a feature and a window_size. This window_size indicates that a new feature will be added that is the average of the values of the feature from `t-window_size` to `t`. For model parameters, the standard XGBoost parameters can be passed using `model.config.{parameter}`, some may require `+model.config.{parameter}` if the parameter is not set inside the conf/ directory. In addition, one can specify the number of boosting rounds using `model.config.n_rounds`. There are a few additional parameters that are used exclusively for DaskXGBoost for initialization of the LocalCUDACluster: `model.config.cluster.world_size`, which sets the number of GPUs to use, `model.config.cluster.device_pool_frac`, which sets the amount of memory to allocate on the GPUs, `model.config.cluster.protocol` which sets the protocol to use on the cluster, and `model.config.cluster.npartitions` which sets the number of partitions to use for converting to Dask-cuDF. Finally, `trainer.callbacks.early_stopping.patience` can be used to set the early stopping patience of the XGBoost rounds, and `trainer.config.log_interval` can be used to set the frequency of the logging for XGBoost. ### Conversion, Deployment, and Inference Inference takes place after a model has been trained and one wants to run data through. Since this only entails using a forward function, the model can be optimized and converted to many different formats that can perform the forward pass more efficiently. In addition, one can set up a [NVIDIA Triton inference server](https://github.com/triton-inference-server/server), which allows for a continuous stream of data to be presented to and passed through the model. The server provides an inference service via an HTTP or gRPC endpoint at ports 8000 and 8001, respectively, on the “bridge” docker network. The TSPP supports a few versions of inference, including native inference and NVIDIA Triton deployment. Both use the test_forward function specified in the model config (defaults to forward()) as the forward function. To launch native inference, one must have a checkpoint directory from a TSPP training call that includes a .hydra directory and a best_checkpoint.zip from training a Neural Net, a populated checkpoints directory from training an XGBoost, or an arima.pkl file from training an ARIMA model. Then run ``` python launch_inference.py checkpoint=/path/to/checkpoint/directory ``` Note: Do not confuse the checkpoint directory with the TimeSeriesPredictionPlatform/outputs/ directory. The directory to use in the inference call is typically two levels lower (for example, /path/to/TimeSeriesPredictionPlatform/outputs/2021-08-23/03-03-11/). The device argument refers to the device that one would like the model to be built on and run on. Note that multi-GPU inference launches are not supported. By default, the evaluator uses the configs specified in the .hydra/config.yaml file from the checkpoint directory. One can override these by including them in the launch. For example, if one wanted to adjust the metrics to use MAE and RMSE only. ``` python launch_inference.py checkpoint=/path/to/checkpoint/directory “+inference.config.evaluator.config.metrics=[‘MAE’, ‘RMSE’]” ``` Note: Be sure to include the + when overriding any of the evaluator configs. Prior to the next section, make sure that the TSPP container is run with the following arguments from the TSPP directory. We recommend an outputs_dir is created that can be used to mount the outputs directory and the multirun folder from multi-GPU runs. ``` docker run -it --rm --gpus all --ipc=host --network=host -v /your/datasets/:/workspace/datasets/ -v /your/outputs_dir/:/your/outputs_dir/ -v $(pwd):$(pwd) -v /your/outputs_dir/outputs/:/workspace/outputs/ -v /your/outputs_dir/multirun/:/workspace/multirun/ -v /var/run/docker.sock:/var/run/docker.sock tspp ``` Note that `/your/outputs_dir/{outputs/multirun}` is equivalent to the python script `os.path.join(/your/outputs_dir/, outputs)`. In the previous command, note that six different directories are mounted. The datasets are mounted to the usual location, but we have two different mount locations for outputs. Mounting the outputs to /workspace/outputs/ allows usual training calls to be saved in your output directory. Similarly, mounting the multirun to /workspace/multirun/ allows multi-GPU to be saved. The second output mount is mounted to the same path as the output directory is in the host. This is essential due to the way we deploy to NVIDIA Triton. The directory of the output in the docker must match the directory of the output on the host machine. Additionally, the mount for /var/run/docker.sock allows the tspp docker container to launch another container. In our case, this is the NVIDIA Triton server. In subsequent calls to launch_triton_configure.py, the /path/to/checkpoint/directory/ must be of the form /your/outputs_dir/{checkpoint_dir} instead of /workspace/{checkpoint_dir} and should be absolute paths. Remember that multi-GPU runs are stored in `multirun` instead of `outputs`. To use deployment, the simplest way is to use the directories `multirun` and `outputs` directly inside the TSPP. This can be achieved by launching the docker as follows. ``` docker run -it --rm --gpus all --ipc=host --network=host -v /your/datasets/:/workspace/datasets/ -v $(pwd)/multirun:/workspace/multirun -v $(pwd)/outputs:/workspace/outputs -v $(pwd):$(pwd) /var/run/docker.sock:/var/run/docker.sock tspp ``` Finally, note that to run the deployment script, you must be in the same directory path in the container as the TSPP is stored on your machine. This means that being in /workspace in the container may not work for running the deployment. If outside the container your TimeSeriesPredictionPlatform is at /home/user/TimeSeriesPredictionPlatform, you must be at the same path in your docker container (/home/user/TimeSeriesPredictionPlatform). This is the purpose of the `-v $(pwd):$(pwd)` in the run script. To launch conversion and deployment, one must again have a checkpoint directory from a TSPP training call that includes a .hydra directory and a best_checkpoint.zip from a Neural Net training or a populated checkpoints directory from an XGBoost training. Stats model, such as Arima, are not supported for deployment. In addition, the model that will be converted must already support conversion to the required format. In the current version of the TSPP, we first export the model to either TorchScript-Script or TorchScript-Trace and subsequently convert it to TorchScript, Onnx, or TRT using the model-navigator package. We also support export to Onnx and conversion to both Onnx and TRT. For XGBoost models, we format the checkpoints and deploy using the FIL backend; there are no extra steps necessary. To run export and conversion (for XGBoost, the deployment/export and deployment/convert fields can be ignored, and no other deployment options are functional): ``` python launch_triton_configure.py deployment/export={ts-trace, ts-script, onnx} deployment/convert={torchscript, onnx, trt} checkpoint=/path/to/checkpoint/directory ``` The format mapping is listed below TorchScript-Script: ts-script TorchScript-Trace: ts-trace TorchScript: torchscript Onnx: onnx TRT: trt Note that some conversions do not support the apex FusedLayerNorm library. To get around this, we set the operating system environment variable ‘TFT_SCRIPTING” to True when loading the model for deployment. This changes the apex LayerNorm to vanilla torch LayerNorm. In addition, one can select the batch size and precision of the conversion, using +inference.config.evaluator.config.batch_size and inference.config.precision=Choice[ fp32, fp16 ] respectively. Once export and conversion have been done, the results are stored in /path/to/checkpoint/directory/deployment. Subsequently, the converted model’s NVIDIA Triton config is generated in the /path/to/checkpoint/directory/deployment/navigator_workspace/model-store/ directory. An additional option in running conversion is selecting whether to run the basics of conversion and NVIDIA Triton config creation or to run the full pipeline of conversion, NVIDIA Triton config creation, profiling, analysis, and helm chart creation. Setting config.inference.optimize=True during launch switches to the full pipeline. Another part of optimization is setting the backend accelerator for NVIDIA Triton config generation. Setting config.inference.accelerator=Choice[none, trt] changes the accelerator specified. Note that this defaults to ‘none’ and ‘trt’ is only compatible with the Onnx conversion. If one wants to launch the NVIDIA Triton inference server using a specific GPU, the CUDA index can be specified with the config option config.inference.gpu, which defaults to 0. More information on the conversion is located here: https://github.com/triton-inference-server/model_navigator/blob/v0.2.7/docs/conversion.md More information on the NVIDIA Triton config creation is located here: https://github.com/triton-inference-server/model_navigator/blob/v0.2.7/docs/triton_model_configurator.md More information on the full pipeline is located here: https://github.com/triton-inference-server/model_navigator/blob/v0.2.7/docs/run.md After running `launch_triton_configure.py`, the directories are set up for quick Triton deployment. To start the server: ``` python launch_inference_server.py checkpoint=/path/to/checkpoint/directory ``` Once the script finishes running, the Triton server will run in the background waiting for inputs until it is closed. In order to run inference on the test dataset, the checkpoint was trained on: ``` python launch_inference.py inference=triton checkpoint=/path/to/checkpoint/directory ``` Similar to the native inference, one can again override the evaluator configs. The NVIDIA Triton model name is set as the second directory to the model. For example, in the case of our TFT model, whose path is models.tft_pyt.TemporalFusionTransformer, the name of the NVIDIA Triton model is tft_pyt. In the case of XGBoost, there is a different model name for each model in the horizon length, specified as `xgb_{i}`. There is a config option +inference.config.model_name, which can be set to the NVIDIA Triton model name. This does not set the name of the model but instead selects which of the possible models in the model-store directory will be used for inference. This is useful after a call using the optimize option, which can generate multiple different models in the model-store. For both the native and triton launch_inference, one can specify what dataset and target_scalers to use (if any) as long as the data shapes do not conflict with the already trained model. To specify a dataset directory use +inference.config.dataset_dir=/path/to/dataset. The dataset directory must contain a tspp_preprocess.bin file as well as either train.bin/valid.bin/test.bin or train.csv/valid.csv/test.csv, depending on the configuration option dataset.config.binarized (this option cannot be changed during deployment or inference). Once the path has been set, deployment and inference both use the test dataset. #### Online Inference The TSPP also supports an online inference solution for both XGBoost models and Neural models. Given raw data (not preprocessed by TSPP), both native and NVIDIA Triton inference can preprocess and pass the data through the models. When running, specify `+inference.config.dataset_path=/path/to/raw/data/csv` and if applicable `+inference.config.preproc_state_path=/path/to/tspp_preprocess.bin` (if the preprocess state is saved elsewhere). Note this is not yet supported on ARIMA models. As a final note, make sure to close the NVIDIA Triton Inference Server docker container when finished using `docker stop trt_server_cont`. Our TFT model supports export to TorchScript-Trace and conversion to all formats. If you encounter an error such as ``` RuntimeError: Model tft_pyt:1 is not ready ``` Or ``` ERROR root Exception in callback <function InferenceServerClient.async_infer.<locals>.wrapped_callback at 0x7f9437b469d0>: AttributeError("'InferenceServerException' object has no attribute 'get_response'") ``` There are a few possible reasons for this to come up. First, make sure that when the TSPP docker container was launched, the network argument was set to host. Second, ensure the correct initial path is used, so something of the form /home/user/TimeSeriesPredictionPlatform instead of /workspace. Next, one can run “docker ps”; if the container “trt_server_cont” shows up, close it using “docker stop trt_server_cont”. After this, one should try rerunning the command. If neither of these steps is applicable or the problem persists, it is a more specific issue that requires more debugging. ### Parameters Config structure reflects the internal design of the tool. Most components have their config stored in ``` /workspace/conf/{component_type}/{component_name}.yaml ``` With a few exceptions where components are strictly dependent (for example, optimizer can be used only during training, so its configuration is stored in `/workspace/conf/trainer/optimizer/{optimizer_name}.yaml`) If a parameter does not exist in the config, you must prepend `+` to its reference in the command line call. For example, `+trainer.config.force_rerun=...` adds force_rerun to trainer.config, but trainer.config.force_rerun=... errors. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads with frequent updates to our software stack. For our latest performance data, refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog November 2021 - Initial release July 2022 - Reworked config structure - Added parallel execution - Fixed race condition when using torch distributed - Switched to optuna plugin instead of having custom code - Added basic suspend resume utility - Added curriculum learning option - Weights are allowed for arbitrary loss function - Removed visualization (will be added in a future release) - Added XGBoost model - Added multi ID dataset for models like Informer - Added example scripts - Criterions and optimizers no longer require dummy wrappers ### Known issues If you encounter errors stating `srcIndex < value`, verify that your categorical cardinalities are the correct size, this indicates that the value of a categorical you are trying to embed is too large for its respective embedding table.
PyTorch/SpeechSynthesis/Tacotron2/filelists
filelists
ljs_audio_text_val_filelist
LJSpeech-1.1/wavs/LJ022-0023.wav|The overwhelming majority of people in this country know how to sift the wheat from the chaff in what they hear and what they read. LJSpeech-1.1/wavs/LJ043-0030.wav|If somebody did that to me, a lousy trick like that, to take my wife away, and all the furniture, I would be mad as hell, too. LJSpeech-1.1/wavs/LJ005-0201.wav|as is shown by the report of the Commissioners to inquire into the state of the municipal corporations in eighteen thirty-five. LJSpeech-1.1/wavs/LJ001-0110.wav|Even the Caslon type when enlarged shows great shortcomings in this respect: LJSpeech-1.1/wavs/LJ003-0345.wav|All the committee could do in this respect was to throw the responsibility on others. LJSpeech-1.1/wavs/LJ007-0154.wav|These pungent and well-grounded strictures applied with still greater force to the unconvicted prisoner, the man who came to the prison innocent, and still uncontaminated, LJSpeech-1.1/wavs/LJ018-0098.wav|and recognized as one of the frequenters of the bogus law-stationers. His arrest led to that of others. LJSpeech-1.1/wavs/LJ047-0044.wav|Oswald was, however, willing to discuss his contacts with Soviet authorities. He denied having any involvement with Soviet intelligence agencies LJSpeech-1.1/wavs/LJ031-0038.wav|The first physician to see the President at Parkland Hospital was Dr. Charles J. Carrico, a resident in general surgery. LJSpeech-1.1/wavs/LJ048-0194.wav|during the morning of November twenty-two prior to the motorcade. LJSpeech-1.1/wavs/LJ049-0026.wav|On occasion the Secret Service has been permitted to have an agent riding in the passenger compartment with the President. LJSpeech-1.1/wavs/LJ004-0152.wav|although at Mr. Buxton's visit a new jail was in process of erection, the first step towards reform since Howard's visitation in seventeen seventy-four. LJSpeech-1.1/wavs/LJ008-0278.wav|or theirs might be one of many, and it might be considered necessary to "make an example." LJSpeech-1.1/wavs/LJ043-0002.wav|The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy. Chapter seven. Lee Harvey Oswald: LJSpeech-1.1/wavs/LJ009-0114.wav|Mr. Wakefield winds up his graphic but somewhat sensational account by describing another religious service, which may appropriately be inserted here. LJSpeech-1.1/wavs/LJ028-0506.wav|A modern artist would have difficulty in doing such accurate work. LJSpeech-1.1/wavs/LJ050-0168.wav|with the particular purposes of the agency involved. The Commission recognizes that this is a controversial area LJSpeech-1.1/wavs/LJ039-0223.wav|Oswald's Marine training in marksmanship, his other rifle experience and his established familiarity with this particular weapon LJSpeech-1.1/wavs/LJ029-0032.wav|According to O'Donnell, quote, we had a motorcade wherever we went, end quote. LJSpeech-1.1/wavs/LJ031-0070.wav|Dr. Clark, who most closely observed the head wound, LJSpeech-1.1/wavs/LJ034-0198.wav|Euins, who was on the southwest corner of Elm and Houston Streets testified that he could not describe the man he saw in the window. LJSpeech-1.1/wavs/LJ026-0068.wav|Energy enters the plant, to a small extent, LJSpeech-1.1/wavs/LJ039-0075.wav|once you know that you must put the crosshairs on the target and that is all that is necessary. LJSpeech-1.1/wavs/LJ004-0096.wav|the fatal consequences whereof might be prevented if the justices of the peace were duly authorized LJSpeech-1.1/wavs/LJ005-0014.wav|Speaking on a debate on prison matters, he declared that LJSpeech-1.1/wavs/LJ012-0161.wav|he was reported to have fallen away to a shadow. LJSpeech-1.1/wavs/LJ018-0239.wav|His disappearance gave color and substance to evil reports already in circulation that the will and conveyance above referred to LJSpeech-1.1/wavs/LJ019-0257.wav|Here the tread-wheel was in use, there cellular cranks, or hard-labor machines. LJSpeech-1.1/wavs/LJ028-0008.wav|you tap gently with your heel upon the shoulder of the dromedary to urge her on. LJSpeech-1.1/wavs/LJ024-0083.wav|This plan of mine is no attack on the Court; LJSpeech-1.1/wavs/LJ042-0129.wav|No night clubs or bowling alleys, no places of recreation except the trade union dances. I have had enough. LJSpeech-1.1/wavs/LJ036-0103.wav|The police asked him whether he could pick out his passenger from the lineup. LJSpeech-1.1/wavs/LJ046-0058.wav|During his Presidency, Franklin D. Roosevelt made almost four hundred journeys and traveled more than three hundred fifty thousand miles. LJSpeech-1.1/wavs/LJ014-0076.wav|He was seen afterwards smoking and talking with his hosts in their back parlor, and never seen again alive. LJSpeech-1.1/wavs/LJ002-0043.wav|long narrow rooms -- one thirty-six feet, six twenty-three feet, and the eighth eighteen, LJSpeech-1.1/wavs/LJ009-0076.wav|We come to the sermon. LJSpeech-1.1/wavs/LJ017-0131.wav|even when the high sheriff had told him there was no possibility of a reprieve, and within a few hours of execution. LJSpeech-1.1/wavs/LJ046-0184.wav|but there is a system for the immediate notification of the Secret Service by the confining institution when a subject is released or escapes. LJSpeech-1.1/wavs/LJ014-0263.wav|When other pleasures palled he took a theatre, and posed as a munificent patron of the dramatic art. LJSpeech-1.1/wavs/LJ042-0096.wav|(old exchange rate) in addition to his factory salary of approximately equal amount LJSpeech-1.1/wavs/LJ049-0050.wav|Hill had both feet on the car and was climbing aboard to assist President and Mrs. Kennedy. LJSpeech-1.1/wavs/LJ019-0186.wav|seeing that since the establishment of the Central Criminal Court, Newgate received prisoners for trial from several counties, LJSpeech-1.1/wavs/LJ028-0307.wav|then let twenty days pass, and at the end of that time station near the Chaldasan gates a body of four thousand. LJSpeech-1.1/wavs/LJ012-0235.wav|While they were in a state of insensibility the murder was committed. LJSpeech-1.1/wavs/LJ034-0053.wav|reached the same conclusion as Latona that the prints found on the cartons were those of Lee Harvey Oswald. LJSpeech-1.1/wavs/LJ014-0030.wav|These were damnatory facts which well supported the prosecution. LJSpeech-1.1/wavs/LJ015-0203.wav|but were the precautions too minute, the vigilance too close to be eluded or overcome? LJSpeech-1.1/wavs/LJ028-0093.wav|but his scribe wrote it in the manner customary for the scribes of those days to write of their royal masters. LJSpeech-1.1/wavs/LJ002-0018.wav|The inadequacy of the jail was noticed and reported upon again and again by the grand juries of the city of London, LJSpeech-1.1/wavs/LJ028-0275.wav|At last, in the twentieth month, LJSpeech-1.1/wavs/LJ012-0042.wav|which he kept concealed in a hiding-place with a trap-door just under his bed. LJSpeech-1.1/wavs/LJ011-0096.wav|He married a lady also belonging to the Society of Friends, who brought him a large fortune, which, and his own money, he put into a city firm, LJSpeech-1.1/wavs/LJ036-0077.wav|Roger D. Craig, a deputy sheriff of Dallas County, LJSpeech-1.1/wavs/LJ016-0318.wav|Other officials, great lawyers, governors of prisons, and chaplains supported this view. LJSpeech-1.1/wavs/LJ013-0164.wav|who came from his room ready dressed, a suspicious circumstance, as he was always late in the morning. LJSpeech-1.1/wavs/LJ027-0141.wav|is closely reproduced in the life-history of existing deer. Or, in other words, LJSpeech-1.1/wavs/LJ028-0335.wav|accordingly they committed to him the command of their whole army, and put the keys of their city into his hands. LJSpeech-1.1/wavs/LJ031-0202.wav|Mrs. Kennedy chose the hospital in Bethesda for the autopsy because the President had served in the Navy. LJSpeech-1.1/wavs/LJ021-0145.wav|From those willing to join in establishing this hoped-for period of peace, LJSpeech-1.1/wavs/LJ016-0288.wav|"Müller, Müller, He's the man," till a diversion was created by the appearance of the gallows, which was received with continuous yells. LJSpeech-1.1/wavs/LJ028-0081.wav|Years later, when the archaeologists could readily distinguish the false from the true, LJSpeech-1.1/wavs/LJ018-0081.wav|his defense being that he had intended to commit suicide, but that, on the appearance of this officer who had wronged him, LJSpeech-1.1/wavs/LJ021-0066.wav|together with a great increase in the payrolls, there has come a substantial rise in the total of industrial profits LJSpeech-1.1/wavs/LJ009-0238.wav|After this the sheriffs sent for another rope, but the spectators interfered, and the man was carried back to jail. LJSpeech-1.1/wavs/LJ005-0079.wav|and improve the morals of the prisoners, and shall insure the proper measure of punishment to convicted offenders. LJSpeech-1.1/wavs/LJ035-0019.wav|drove to the northwest corner of Elm and Houston, and parked approximately ten feet from the traffic signal. LJSpeech-1.1/wavs/LJ036-0174.wav|This is the approximate time he entered the roominghouse, according to Earlene Roberts, the housekeeper there. LJSpeech-1.1/wavs/LJ046-0146.wav|The criteria in effect prior to November twenty-two, nineteen sixty-three, for determining whether to accept material for the PRS general files LJSpeech-1.1/wavs/LJ017-0044.wav|and the deepest anxiety was felt that the crime, if crime there had been, should be brought home to its perpetrator. LJSpeech-1.1/wavs/LJ017-0070.wav|but his sporting operations did not prosper, and he became a needy man, always driven to desperate straits for cash. LJSpeech-1.1/wavs/LJ014-0020.wav|He was soon afterwards arrested on suspicion, and a search of his lodgings brought to light several garments saturated with blood; LJSpeech-1.1/wavs/LJ016-0020.wav|He never reached the cistern, but fell back into the yard, injuring his legs severely. LJSpeech-1.1/wavs/LJ045-0230.wav|when he was finally apprehended in the Texas Theatre. Although it is not fully corroborated by others who were present, LJSpeech-1.1/wavs/LJ035-0129.wav|and she must have run down the stairs ahead of Oswald and would probably have seen or heard him. LJSpeech-1.1/wavs/LJ008-0307.wav|afterwards express a wish to murder the Recorder for having kept them so long in suspense. LJSpeech-1.1/wavs/LJ008-0294.wav|nearly indefinitely deferred. LJSpeech-1.1/wavs/LJ047-0148.wav|On October twenty-five, LJSpeech-1.1/wavs/LJ008-0111.wav|They entered a "stone cold room," and were presently joined by the prisoner. LJSpeech-1.1/wavs/LJ034-0042.wav|that he could only testify with certainty that the print was less than three days old. LJSpeech-1.1/wavs/LJ037-0234.wav|Mrs. Mary Brock, the wife of a mechanic who worked at the station, was there at the time and she saw a white male, LJSpeech-1.1/wavs/LJ040-0002.wav|Chapter seven. Lee Harvey Oswald: Background and Possible Motives, Part one. LJSpeech-1.1/wavs/LJ045-0140.wav|The arguments he used to justify his use of the alias suggest that Oswald may have come to think that the whole world was becoming involved LJSpeech-1.1/wavs/LJ012-0035.wav|the number and names on watches, were carefully removed or obliterated after the goods passed out of his hands. LJSpeech-1.1/wavs/LJ012-0250.wav|On the seventh July, eighteen thirty-seven, LJSpeech-1.1/wavs/LJ016-0179.wav|contracted with sheriffs and conveners to work by the job. LJSpeech-1.1/wavs/LJ016-0138.wav|at a distance from the prison. LJSpeech-1.1/wavs/LJ027-0052.wav|These principles of homology are essential to a correct interpretation of the facts of morphology. LJSpeech-1.1/wavs/LJ031-0134.wav|On one occasion Mrs. Johnson, accompanied by two Secret Service agents, left the room to see Mrs. Kennedy and Mrs. Connally. LJSpeech-1.1/wavs/LJ019-0273.wav|which Sir Joshua Jebb told the committee he considered the proper elements of penal discipline. LJSpeech-1.1/wavs/LJ014-0110.wav|At the first the boxes were impounded, opened, and found to contain many of O'Connor's effects. LJSpeech-1.1/wavs/LJ034-0160.wav|on Brennan's subsequent certain identification of Lee Harvey Oswald as the man he saw fire the rifle. LJSpeech-1.1/wavs/LJ038-0199.wav|eleven. If I am alive and taken prisoner, LJSpeech-1.1/wavs/LJ014-0010.wav|yet he could not overcome the strange fascination it had for him, and remained by the side of the corpse till the stretcher came. LJSpeech-1.1/wavs/LJ033-0047.wav|I noticed when I went out that the light was on, end quote, LJSpeech-1.1/wavs/LJ040-0027.wav|He was never satisfied with anything. LJSpeech-1.1/wavs/LJ048-0228.wav|and others who were present say that no agent was inebriated or acted improperly. LJSpeech-1.1/wavs/LJ003-0111.wav|He was in consequence put out of the protection of their internal law, end quote. Their code was a subject of some curiosity. LJSpeech-1.1/wavs/LJ008-0258.wav|Let me retrace my steps, and speak more in detail of the treatment of the condemned in those bloodthirsty and brutally indifferent days, LJSpeech-1.1/wavs/LJ029-0022.wav|The original plan called for the President to spend only one day in the State, making whirlwind visits to Dallas, Fort Worth, San Antonio, and Houston. LJSpeech-1.1/wavs/LJ004-0045.wav|Mr. Sturges Bourne, Sir James Mackintosh, Sir James Scarlett, and William Wilberforce.
PyTorch/Classification/GPUNet/triton
triton
calculate_metrics
#!/usr/bin/env python3 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class. Data provided to `MetricsCalculator` are obtained from dump files stored in directory pointed by `--dump-dir` argument. Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts. Output data is stored in csv file pointed by `--csv` argument. Example call: ```shell script python ./triton/calculate_metrics.py \ --dump-dir /results/dump_triton \ --csv /results/accuracy_results.csv \ --metrics metrics.py \ --metric-class-param1 value ``` """ import argparse import csv import logging import string from pathlib import Path # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file from .deployment_toolkit.dump import JsonDumpReader LOGGER = logging.getLogger("calculate_metrics") TOTAL_COLUMN_NAME = "_total_" def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False) parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True) parser.add_argument("--csv", help="Path to csv file", required=True) parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True) args, *_ = parser.parse_known_args() MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") ArgParserGenerator(MetricsCalculator).update_argparser(parser) args = parser.parse_args() LOGGER.info("args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args) reader = JsonDumpReader(args.dump_dir) for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]): ids = list(ids["ids"]) if ids is not None else None metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true) metrics = metrics_calculator.metrics metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])] if metric_names_with_space: raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}") csv_path = Path(args.csv) csv_path.parent.mkdir(parents=True, exist_ok=True) with csv_path.open("w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys())) writer.writeheader() writer.writerow(metrics) if __name__ == "__main__": main()
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda
bermuda
onnx
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.optimizer import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE # pytype: enable=import-error from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple([_get_dim(d) for d in shape.dim]) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path]) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
PyTorch/Classification/ConvNets/triton/deployment_toolkit/library
library
__init__
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
TensorFlow/Classification/ConvNets/resnext101-32x4d
resnext101-32x4d
README
# ResNext101-32x4d for TensorFlow This repository provides a script and recipe to train the ResNext101-32x4d model to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. ResNext101-32x4d model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements. ## Table Of Contents * [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Optimizer](#optimizer) * [Data augmentation](#data-augmentation) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Setup](#setup) * [Requirements](#requirements) * [Quick Start Guide](#quick-start-guide) * [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [The `main.py` script](#the-mainpy-script) * [Inference process](#inference-process) * [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16G)](#training-accuracy-nvidia-dgx-1-8x-v100-16g) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb) * [Training performance: NVIDIA DGX-1 (8x V100 16G)](#training-performance-nvidia-dgx-1-8x-v100-16g) * [Training performance: NVIDIA DGX-2 (16x V100 32G)](#training-performance-nvidia-dgx-2-16x-v100-32g) * [Training time for 90 Epochs](#training-time-for-90-epochs) * [Training time: NVIDIA DGX A100 (8x A100 40G)](#training-time-nvidia-dgx-a100-8x-a100-40gb) * [Training time: NVIDIA DGX-1 (8x V100 16G)](#training-time-nvidia-dgx-1-8x-v100-16g) * [Training time: NVIDIA DGX-2 (16x V100 32G)](#training-time-nvidia-dgx-2-16x-v100-32g) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb) * [Inference performance: NVIDIA DGX-1 (1x V100 16G)](#inference-performance-nvidia-dgx-1-1x-v100-16g) * [Inference performance: NVIDIA DGX-2 (1x V100 32G)](#inference-performance-nvidia-dgx-2-1x-v100-32g) * [Inference performance: NVIDIA T4 (1x T4 16G)](#inference-performance-nvidia-t4-1x-t4-16g) * [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The ResNeXt101-32x4d is a model introduced in the [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf) paper. It is based on a regular ResNet model, substituting 3x3 convolutions inside the bottleneck block for 3x3 grouped convolutions. The following performance optimizations were implemented in this model: * JIT graph compilation with [XLA](https://www.tensorflow.org/xla) * Multi-GPU training with [Horovod](https://github.com/horovod/horovod) * Automated mixed precision [AMP](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture ![ResNextArch](./imgs/ResNeXtArch.png) _Image source: [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf)_ Image shows difference between ResNet bottleneck block and ResNeXt bottleneck block. ResNeXt bottleneck block splits single convolution into multiple smaller, parallel convolutions. ResNeXt101-32x4d model's cardinality equals 32 and bottleneck width equals 4. This means instead of single convolution with 64 filters 32 parallel convolutions with only 4 filters are used. ### Default configuration The following sections highlight the default configuration for the ResNext101-32x4d model. #### Optimizer This model uses the SGD optimizer with the following hyperparameters: * Momentum (0.875). * Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly scale the learning rate. * Learning rate schedule - we use cosine LR schedule. * For bigger batch sizes (512 and up) we use linear warmup of the learning rate. during the first 5 epochs according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677). * Weight decay: 6.103515625e-05 (1/16384). * We do not apply Weight decay on batch norm trainable parameters (gamma/bias). * Label Smoothing: 0.1. * We train for: * 90 Epochs -> 90 epochs is a standard for ResNet family networks. * 250 Epochs -> best possible accuracy. * For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf). #### Data Augmentation This model uses the following data augmentation: * For training: * Normalization. * Random resized crop to 224x224. * Scale from 8% to 100%. * Aspect ratio from 3/4 to 4/3. * Random horizontal flip. * For inference: * Normalization. * Scale to 256x256. * Center crop to 224x224. ### Feature support matrix The following features are supported by this model. | Feature | ResNext101-32x4d Tensorflow | |-----------------------|-------------------------- |Multi-GPU training with [Horovod](https://github.com/horovod/horovod) | Yes | |[NVIDIA DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes | |Automatic mixed precision (AMP) | Yes | #### Features Multi-GPU training with Horovod - Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, refer to the example sources in this repository or the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage). NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader with the DALI library. For details, refer to the example sources in this repository or the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/index.html). Automatic mixed precision (AMP) - Computation graph can be modified by TensorFlow on runtime to support mixed precision training. Detailed explanation of mixed precision can be found in the next section. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally. In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. #### Enabling mixed precision Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models. To enable mixed precision, you can simply add the values to the environmental variables inside your training script: - Enable TF-AMP graph rewrite: ``` os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1" ``` - Enable Automated Mixed Precision: ``` os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ## Setup The following section lists the requirements that you need to meet in order to use the ResNext101-32x4d model. ### Requirements This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates all dependencies. Aside from these dependencies, ensure you have the following software: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) - GPU-based architecture: - [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: * [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html), * [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry), * [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running). For those unable to use the [TensorFlow NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed precision or TF32 with Tensor Cores or FP32, perform the following steps using the default parameters of the ResNext101-32x4d model on the [ImageNet](http://www.image-net.org/) dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow/Classification/ConvNets ``` 2. Download and preprocess the dataset. The ResNext101-32x4d script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge. * [Download the images](http://image-net.org/download-images) * Extract the training and validation data: ```bash mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd .. mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar ``` * Preprocess dataset to TFRecord form using [script](https://github.com/tensorflow/models/blob/archive/research/inception/inception/data/build_imagenet_data.py). Additional metadata from [autors repository](https://github.com/tensorflow/models/tree/archive/research/inception/inception/data) might be required. 3. Build the ResNext101-32x4d TensorFlow NGC container. ```bash docker build . -t nvidia_rn50 ``` 4. Start an interactive session in the NGC container to run training/inference. After you build the container image, you can start an interactive CLI session with ```bash nvidia-docker run --rm -it -v <path to imagenet>:/data/tfrecords --ipc=host nvidia_rn50 ``` 5. (Optional) Create index files to use DALI. To allow proper sharding in a multi-GPU environment, DALI has to create index files for the dataset. To create index files, run inside the container: ```bash bash ./utils/dali_index.sh /data/tfrecords <index file store location> ``` Index files can be created once and then reused. It is highly recommended to save them into a persistent location. 6. Start training. To run training for a standard configuration (as described in [Default configuration](#default-configuration), DGX1V, DGX2V, single GPU, FP16, FP32, 90, and 250 epochs), run one of the scripts in the `resnext101-32x4d/training` directory. Ensure ImageNet is mounted in the `/data/tfrecords` directory. For example, to train on DGX-1 for 90 epochs using AMP, run: `bash ./resnext101-32x4d/training/DGX1_RNxt101-32x4d_AMP_90E.sh /path/to/result /data` Additionally, features like DALI data preprocessing or TensorFlow XLA can be enabled with following arguments when running those scripts: `bash ./resnext101-32x4d/training/DGX1_RNxt101-32x4d_AMP_90E.sh /path/to/result /data --xla --dali` 7. Start validation/evaluation. To evaluate the validation dataset located in `/data/tfrecords`, run `main.py` with `--mode=evaluate`. For example: `python main.py --arch=resnext101-32x4d --mode=evaluate --data_dir=/data/tfrecords --batch_size <batch size> --model_dir <model location> --results_dir <output location> [--xla] [--amp]` The optional `--xla` and `--amp` flags control XLA and AMP during evaluation. ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: - `main.py`: the script that controls the logic of training and validation of the ResNet-like models - `Dockerfile`: Instructions for Docker to build a container with the basic set of dependencies to run ResNet like models for image classification - `requirements.txt`: a set of extra Python requirements for running ResNet-like models The `model/` directory contains the following modules used to define ResNet family models: - `resnet.py`: the definition of ResNet, ResNext, and SE-ResNext model - `blocks/conv2d_block.py`: the definition of 2D convolution block - `blocks/resnet_bottleneck_block.py`: the definition of ResNet-like bottleneck block - `layers/*.py`: definitions of specific layers used in the ResNet-like model The `utils/` directory contains the following utility modules: - `cmdline_helper.py`: helper module for command line processing - `data_utils.py`: module defining input data pipelines - `dali_utils.py`: helper module for DALI - `image_processing.py`: image processing and data augmentation functions - `learning_rate.py`: definition of used learning rate schedule - `optimizers.py`: definition of used custom optimizers - `hooks/*.py`: definitions of specific hooks allowing logging of training and inference process The `runtime/` directory contains the following module that define the mechanics of the training process: - `runner.py`: module encapsulating the training, inference and evaluation ### Parameters #### The `main.py` script The script for training and evaluating the ResNext101-32x4d model has a variety of parameters that control these processes. ``` usage: main.py [-h] [--arch {resnet50,resnext101-32x4d,se-resnext101-32x4d}] [--mode {train,train_and_evaluate,evaluate,predict,training_benchmark,inference_benchmark}] [--export_dir EXPORT_DIR] [--to_predict TO_PREDICT] --batch_size BATCH_SIZE [--num_iter NUM_ITER] [--run_iter RUN_ITER] [--iter_unit {epoch,batch}] [--warmup_steps WARMUP_STEPS] [--model_dir MODEL_DIR] [--results_dir RESULTS_DIR] [--log_filename LOG_FILENAME] [--display_every DISPLAY_EVERY] [--seed SEED] [--gpu_memory_fraction GPU_MEMORY_FRACTION] [--gpu_id GPU_ID] [--finetune_checkpoint FINETUNE_CHECKPOINT] [--use_final_conv] [--quant_delay QUANT_DELAY] [--quantize] [--use_qdq] [--symmetric] [--data_dir DATA_DIR] [--data_idx_dir DATA_IDX_DIR] [--dali] [--synthetic_data_size SYNTHETIC_DATA_SIZE] [--lr_init LR_INIT] [--lr_warmup_epochs LR_WARMUP_EPOCHS] [--weight_decay WEIGHT_DECAY] [--weight_init {fan_in,fan_out}] [--momentum MOMENTUM] [--label_smoothing LABEL_SMOOTHING] [--mixup MIXUP] [--cosine_lr] [--xla] [--data_format {NHWC,NCHW}] [--amp] [--static_loss_scale STATIC_LOSS_SCALE] JoC-RN50v1.5-TF optional arguments: -h, --help show this help message and exit. --arch {resnet50,resnext101-32x4d,se-resnext101-32x4d} Architecture of model to run. --mode {train,train_and_evaluate,evaluate,predict,training_benchmark,inference_benchmark} The execution mode of the script. --export_dir EXPORT_DIR Directory in which to write exported SavedModel. --to_predict TO_PREDICT Path to file or directory of files to run prediction on. --batch_size BATCH_SIZE Size of each minibatch per GPU. --num_iter NUM_ITER Number of iterations to run. --run_iter RUN_ITER Number of training iterations to run on single run. --iter_unit {epoch,batch} Unit of iterations. --warmup_steps WARMUP_STEPS Number of steps considered as warmup and not taken into account for performance measurements. --model_dir MODEL_DIR Directory in which to write model. If undefined, results dir will be used. --results_dir RESULTS_DIR Directory in which to write training logs, summaries and checkpoints. --log_filename LOG_FILENAME Name of the JSON file to which write the training log. --display_every DISPLAY_EVERY How often (in batches) to print out running information. --seed SEED Random seed. --gpu_memory_fraction GPU_MEMORY_FRACTION Limit memory fraction used by training script for DALI. --gpu_id GPU_ID Specify ID of the target GPU on multi-device platform. Effective only for single-GPU mode. --finetune_checkpoint FINETUNE_CHECKPOINT Path to pre-trained checkpoint which will be used for fine-tuning. --use_final_conv Use convolution operator instead of MLP as last layer. --quant_delay QUANT_DELAY Number of steps to be run before quantization starts to happen. --quantize Quantize weights and activations during training. (Defaults to Assymmetric quantization) --use_qdq Use QDQV3 op instead of FakeQuantWithMinMaxVars op for quantization. QDQv3 does only scaling. --symmetric Quantize weights and activations during training using symmetric quantization. Dataset arguments: --data_dir DATA_DIR Path to dataset in TFRecord format. Files should be named 'train-*' and 'validation-*'. --data_idx_dir DATA_IDX_DIR Path to index files for DALI. Files should be named 'train-*' and 'validation-*'. --dali Enable DALI data input. --synthetic_data_size SYNTHETIC_DATA_SIZE Dimension of image for synthetic dataset. Training arguments: --lr_init LR_INIT Initial value for the learning rate. --lr_warmup_epochs LR_WARMUP_EPOCHS Number of warmup epochs for learning rate schedule. --weight_decay WEIGHT_DECAY Weight Decay scale factor. --weight_init {fan_in,fan_out} Model weight initialization method. --momentum MOMENTUM SGD momentum value for the Momentum optimizer. --label_smoothing LABEL_SMOOTHING The value of label smoothing. --mixup MIXUP The alpha parameter for mixup (if 0 then mixup is not applied). --cosine_lr Use cosine learning rate schedule. Generic optimization arguments: --xla Enable XLA (Accelerated Linear Algebra) computation for improved performance. --data_format {NHWC,NCHW} Data format used to do calculations. --amp Enable Automatic Mixed Precision to speedup computation using tensor cores. Automatic Mixed Precision arguments: --static_loss_scale STATIC_LOSS_SCALE Use static loss scaling in FP32 AMP. ``` ### Inference process To run inference on a single example with a checkpoint and a model script, use: `python main.py --arch=resnext101-32x4d --mode predict --model_dir <path to model> --to_predict <path to image> --results_dir <path to results>` The optional `--xla` and `--amp` flags control XLA and AMP during inference. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark the training performance on a specific batch size, run: * For 1 GPU * FP32 / TF32 `python ./main.py --arch=resnext101-32x4d --mode=training_benchmark --warmup_steps 200 --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `python ./main.py --arch=resnext101-32x4d --mode=training_benchmark --amp --warmup_steps 200 --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * For multiple GPUs * FP32 / TF32 `mpiexec --allow-run-as-root --bind-to socket -np <num_gpus> python ./main.py --arch=resnext101-32x4d --mode=training_benchmark --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `mpiexec --allow-run-as-root --bind-to socket -np <num_gpus> python ./main.py --arch=resnext101-32x4d --mode=training_benchmark --amp --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` Each of these scripts runs 200 warm-up iterations and measures the first epoch. To control warmup and benchmark length, use the `--warmup_steps`, `--num_iter` and `--iter_unit` flags. Features like XLA or DALI can be controlled with `--xla` and `--dali` flags. For proper throughput reporting the value of `--num_iter` must be greater than `--warmup_steps` value. Suggested batch sizes for training are 128 for mixed precision training and 64 for single precision training per single V100 16 GB. If no `--data_dir=<path to imagenet>` flag is specified then the benchmarks will use a synthetic dataset. The resolution of synthetic images used can be controlled with `--synthetic_data_size` flag. #### Inference performance benchmark To benchmark the inference performance on a specific batch size, run: * FP32 / TF32 `python ./main.py --arch=resnext101-32x4d --mode=inference_benchmark --warmup_steps 20 --num_iter 100 --iter_unit batch --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` * AMP `python ./main.py --arch=resnext101-32x4d --mode=inference_benchmark --amp --warmup_steps 20 --num_iter 100 --iter_unit batch --batch_size <batch size> --data_dir=<path to imagenet> --results_dir=<path to results directory>` By default, each of these scripts runs 20 warm-up iterations and measures the next 80 iterations. To control warm-up and benchmark length, use the `--warmup_steps`, `--num_iter` and `--iter_unit` flags. If no `--data_dir=<path to imagenet>` flag is specified then the benchmarks will use a synthetic dataset. The benchmark can be automated with the `inference_benchmark.sh` script provided in `resnext101-32x4d`, by simply running: `bash ./resnext101-32x4d/inference_benchmark.sh <data dir> <data idx dir>` The `<data dir>` parameter refers to the input data directory (by default `/data/tfrecords` inside the container). By default, the benchmark tests the following configurations: **FP32**, **AMP**, **AMP + XLA** with different batch sizes. When the optional directory with the DALI index files `<data idx dir>` is specified, the benchmark executes an additional **DALI + AMP + XLA** configuration. For proper throughput reporting the value of `--num_iter` must be greater than `--warmup_steps` value. For performance benchamrk of raw model, synthetic dataset can be used. To use synthetic dataset, use `--synthetic_data_size` flag instead of `--data_dir` to specify input image size. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the `/resnet50v1.5/training/DGXA100_RN50_{PRECISION}_90E.sh` training script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. | Epochs | Batch Size / GPU | Accuracy - TF32 (top1) | Accuracy - mixed precision (top1) | |--------|------------------|-----------------|----------------------------| | 90 | 128 (TF32) / 256 (AMP) | 79.38 | 79.20 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 16G) Our results were obtained by running the `/resnext101-32x4d/training/DGX1_RNxt101-32x4d_{PRECISION}_{EPOCHS}E.sh` training script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs. | Epochs | Batch Size / GPU | Accuracy - FP32 | Accuracy - mixed precision | |--------|------------------|-----------------|----------------------------| | 90 | 64 (FP32) / 128 (AMP) | 79.35 | 79.30 | | 250 | 64 (FP32) / 128 (AMP) | 80.21 | 80.21 | **Example training loss plot** ![TrainingLoss](./imgs/train_loss.png) #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the `resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - TF32 + XLA | Throughput - mixed precision + XLA | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 + XLA| Weak scaling - mixed precision + XLA | |----|---------------|---------------|------------------------|-----------------|-----------|-------------------| | 1 | 128 (TF) / 256 (AMP) | 371 img/s | 1132 img/s | 3.05x | 1.00x | 1.00x | | 8 | 128 (TF) / 256 (AMP) | 2854 img/s | 8500 img/s | 2.98x | 7.69x | 7.51x | ##### Training performance: NVIDIA DGX-1 (8x V100 16G) Our results were obtained by running the `resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (8x V100 16G) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | |----|---------------|---------------|------------------------|-----------------|-----------|-------------------| | 1 | 64 (FP32) / 128 (AMP) | 166 img/s | 566 img/s | 3.40x | 1.00x | 1.00x | | 8 | 64 (FP32) / 128 (AMP) | 1210 img/s | 4160 img/s | 3.44x | 7.29x | 7.35x | ##### Training performance: NVIDIA DGX-2 (16x V100 32G) Our results were obtained by running the `resnext101-32x4d/training/training_perf.sh` benchmark script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-2 with (16x V100 32G) GPUs. Performance numbers (in images per second) were averaged over an entire training epoch. | GPUs | Batch Size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | |----|---------------|---------------|-------------------------|-------|--------|--------| | 1 | 64 (FP32) / 128 (AMP) | 170 img/s | 572 img/s | 3.36x | 1.00x | 1.00x | | 16 | 64 (FP32) / 128 (AMP) | 2500 img/s | 7750 img/s | 3.10x | 14.70x | 13.55x | #### Training Time for 90 Epochs ##### Training time: NVIDIA DGX A100 (8x A100 40GB) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-a100-8x-a100-40g) on NVIDIA DGX A100 with (8x A100 40G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - TF32 + XLA | |---|--------|---------| | 1 | ~35h | ~94h | | 8 | ~2h | ~5h | ##### Training time: NVIDIA DGX-1 (8x V100 16G) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-1-8x-v100-16g) on NVIDIA DGX-1 with (8x V100 16G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - FP32 + XLA | |---|--------|---------| | 1 | ~56h | ~192h | | 8 | ~8h | ~27h | ##### Training time: NVIDIA DGX-2 (16x V100 32G) Our results were estimated based on the [training performance results](#training-performance-nvidia-dgx-2-16x-v100-32g) on NVIDIA DGX-2 with (16x V100 32G) GPUs. | GPUs | Time to train - mixed precision + XLA | Time to train - FP32 + XLA | |----|-------|-------| | 1 | ~55h | ~188h | | 16 | ~4h | ~12h | #### Inference performance results ##### Inference performance: NVIDIA DGX A100 (1x A100 40GB) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX A100 with (1x A100 40G) GPU. **TF32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 111.07 img/s | 9.04 ms | 9.05 ms | 9.10 ms | 9.45 ms | | 2 | 200.35 img/s | 10.01 ms | 10.05 ms | 10.08 ms | 10.24 ms | | 4 | 283.11 img/s | 14.15 ms | 14.36 ms | 14.43 ms | 14.65 ms | | 8 | 416.93 img/s | 19.19 ms | 19.64 ms | 19.90 ms | 20.14 ms | | 16 | 629.64 img/s | 25.44 ms | 25.82 ms | 25.97 ms | 26.51 ms | | 32 | 766.57 img/s | 41.83 ms | 42.30 ms | 42.65 ms | 43.45 ms | | 64 | 836.72 img/s | 76.50 ms | 77.07 ms | 77.44 ms | 78.72 ms | | 128 | 864.37 img/s | 148.27 ms | 148.54 ms | 148.93 ms | 149.62 ms | | 256 | 902.67 img/s | 283.60 ms | 284.57 ms | 285.02 ms | 285.74 ms | **TF32 Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 107.46 img/s | 9.34 ms | 9.36 ms | 9.40 ms | 9.95 ms | | 2 | 192.54 img/s | 10.42 ms | 10.48 ms | 10.54 ms | 11.21 ms | | 4 | 280.89 img/s | 14.26 ms | 14.41 ms | 14.53 ms | 14.94 ms | | 8 | 387.41 img/s | 20.65 ms | 21.19 ms | 21.37 ms | 21.74 ms | | 16 | 676.19 img/s | 23.67 ms | 24.34 ms | 24.55 ms | 25.61 ms | | 32 | 902.44 img/s | 35.46 ms | 36.22 ms | 36.40 ms | 37.00 ms | | 64 | 1028.06 img/s | 62.34 ms | 63.46 ms | 64.38 ms | 72.65 ms | | 128 | 1096.39 img/s | 116.80 ms | 118.10 ms | 118.82 ms | 121.00 ms | | 256 | 1153.50 img/s | 221.93 ms | 223.18 ms | 223.49 ms | 223.90 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 127.96 img/s | 7.84 ms | 7.88 ms | 7.92 ms | 8.00 ms | | 2 | 243.62 img/s | 8.24 ms | 8.28 ms | 8.31 ms | 8.58 ms | | 4 | 491.02 img/s | 8.18 ms | 8.36 ms | 8.43 ms | 8.99 ms | | 8 | 952.95 img/s | 8.40 ms | 8.80 ms | 8.94 ms | 9.31 ms | | 16 | 1625.38 img/s | 9.85 ms | 10.19 ms | 10.45 ms | 10.86 ms | | 32 | 1991.14 img/s | 16.22 ms | 16.46 ms | 16.78 ms | 17.59 ms | | 64 | 2138.11 img/s | 30.08 ms | 31.02 ms | 31.34 ms | 32.27 ms | | 128 | 2140.59 img/s | 59.81 ms | 61.37 ms | 61.77 ms | 62.53 ms | | 256 | 2185.86 img/s | 117.12 ms | 118.35 ms | 118.72 ms | 119.84 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 86.02 img/s | 11.66 ms | 11.78 ms | 11.82 ms | 12.18 ms | | 2 | 166.91 img/s | 12.01 ms | 12.10 ms | 12.14 ms | 12.25 ms | | 4 | 330.75 img/s | 12.10 ms | 12.45 ms | 12.87 ms | 13.27 ms | | 8 | 675.53 img/s | 11.84 ms | 12.08 ms | 12.24 ms | 12.59 ms | | 16 | 1234.52 img/s | 13.06 ms | 13.89 ms | 14.11 ms | 15.01 ms | | 32 | 2501.78 img/s | 13.09 ms | 14.14 ms | 15.25 ms | 25.57 ms | | 64 | 3049.35 img/s | 21.12 ms | 22.24 ms | 23.27 ms | 28.62 ms | | 128 | 3324.24 img/s | 38.98 ms | 40.07 ms | 40.81 ms | 51.07 ms | | 256 | 3166.28 img/s | 82.05 ms | 94.93 ms | 101.78 ms | 119.88 ms | ##### Inference performance: NVIDIA DGX-1 (1x V100 16G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-1 with (1x V100 16G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 98.34 img/s | 10.24 ms | 10.27 ms | 10.32 ms | 12.89 ms | | 2 | 167.04 img/s | 11.98 ms | 12.17 ms | 12.24 ms | 12.59 ms | | 4 | 214.18 img/s | 18.68 ms | 18.80 ms | 18.88 ms | 19.73 ms | | 8 | 259.96 img/s | 30.78 ms | 31.04 ms | 31.08 ms | 31.44 ms | | 16 | 350.71 img/s | 45.63 ms | 45.81 ms | 45.88 ms | 47.96 ms | | 32 | 407.80 img/s | 78.74 ms | 78.66 ms | 79.04 ms | 110.32 ms | | 64 | 461.88 img/s | 138.57 ms | 139.34 ms | 139.68 ms | 141.54 ms | | 128 | 493.61 img/s | 259.57 ms | 260.38 ms | 260.84 ms | 262.40 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 84.74 img/s | 11.85 ms | 11.95 ms | 12.02 ms | 12.17 ms | | 2 | 183.64 img/s | 10.94 ms | 11.08 ms | 11.18 ms | 11.36 ms | | 4 | 359.91 img/s | 11.17 ms | 11.35 ms | 11.46 ms | 11.80 ms | | 8 | 736.61 img/s | 10.87 ms | 11.17 ms | 11.31 ms | 11.46 ms | | 16 | 1058.59 img/s | 15.22 ms | 15.30 ms | 15.47 ms | 16.51 ms | | 32 | 1152.14 img/s | 28.03 ms | 27.99 ms | 28.11 ms | 29.55 ms | | 64 | 1275.35 img/s | 50.38 ms | 50.41 ms | 50.52 ms | 51.39 ms | | 128 | 1347.11 img/s | 95.02 ms | 95.51 ms | 95.70 ms | 96.29 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 59.84 img/s | 16.77 ms | 16.95 ms | 17.00 ms | 17.23 ms | | 2 | 120.41 img/s | 16.66 ms | 16.90 ms | 16.97 ms | 17.21 ms | | 4 | 242.75 img/s | 16.48 ms | 16.96 ms | 17.10 ms | 17.55 ms | | 8 | 466.47 img/s | 17.15 ms | 17.50 ms | 17.65 ms | 17.94 ms | | 16 | 861.72 img/s | 18.69 ms | 19.19 ms | 19.33 ms | 19.68 ms | | 32 | 1472.21 img/s | 22.06 ms | 22.32 ms | 22.82 ms | 23.91 ms | | 64 | 1728.76 img/s | 37.24 ms | 37.49 ms | 37.65 ms | 38.08 ms | | 128 | 1892.97 img/s | 67.62 ms | 68.24 ms | 68.49 ms | 69.47 ms | | ##### Inference performance: NVIDIA DGX-2 (1x V100 32G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA DGX-2 with (1x V100 32G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 96.91 img/s | 10.38 ms | 10.46 ms | 10.53 ms | 11.32 ms | | 2 | 163.02 img/s | 12.33 ms | 12.54 ms | 12.77 ms | 13.45 ms | | 4 | 206.76 img/s | 19.35 ms | 19.52 ms | 19.63 ms | 20.09 ms | | 8 | 249.68 img/s | 32.05 ms | 32.24 ms | 32.31 ms | 33.26 ms | | 16 | 330.36 img/s | 48.43 ms | 48.63 ms | 48.69 ms | 49.03 ms | | 32 | 399.97 img/s | 80.00 ms | 80.44 ms | 80.62 ms | 81.28 ms | | 64 | 481.88 img/s | 132.94 ms | 133.05 ms | 133.16 ms | 133.71 ms | | 128 | 519.85 img/s | 246.22 ms | 247.09 ms | 247.71 ms | 250.49 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 108.86 img/s | 9.24 ms | 9.36 ms | 9.42 ms | 9.57 ms | | 2 | 215.01 img/s | 9.36 ms | 9.42 ms | 9.46 ms | 9.68 ms | | 4 | 422.09 img/s | 9.48 ms | 9.70 ms | 9.80 ms | 10.10 ms | | 8 | 791.52 img/s | 10.12 ms | 10.24 ms | 10.32 ms | 10.58 ms | | 16 | 1064.30 img/s | 15.16 ms | 15.27 ms | 15.32 ms | 17.23 ms | | 32 | 1190.90 img/s | 27.11 ms | 27.00 ms | 27.10 ms | 27.97 ms | | 64 | 1319.63 img/s | 48.49 ms | 48.73 ms | 48.82 ms | 49.32 ms | | 128 | 1397.36 img/s | 91.60 ms | 91.93 ms | 92.07 ms | 92.61 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 76.34 img/s | 13.16 ms | 13.37 ms | 13.49 ms | 13.74 ms | | 2 | 150.90 img/s | 13.31 ms | 13.54 ms | 13.61 ms | 13.87 ms | | 4 | 284.88 img/s | 14.10 ms | 15.28 ms | 15.38 ms | 15.68 ms | | 8 | 587.77 img/s | 13.61 ms | 13.87 ms | 13.94 ms | 14.06 ms | | 16 | 1089.95 img/s | 14.80 ms | 14.91 ms | 15.04 ms | 15.46 ms | | 32 | 1503.51 img/s | 21.55 ms | 21.33 ms | 21.38 ms | 21.91 ms | | 64 | 1765.86 img/s | 36.47 ms | 36.39 ms | 36.51 ms | 37.15 ms | | 128 | 2003.04 img/s | 63.91 ms | 64.95 ms | 65.07 ms | 65.47 ms | | ##### Inference performance: NVIDIA T4 (1x T4 16G) Our results were obtained by running the `inference_benchmark.sh` inferencing benchmarking script in the [TensorFlow 20.06-tf1-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container on NVIDIA T4 with (1x T4 16G) GPU. **FP32 Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 31.92 img/s | 31.42 ms | 31.58 ms | 31.78 ms | 37.56 ms | | 2 | 45.62 img/s | 43.92 ms | 44.83 ms | 45.80 ms | 46.99 ms | | 4 | 70.42 img/s | 56.80 ms | 57.14 ms | 57.47 ms | 59.30 ms | | 8 | 85.68 img/s | 93.36 ms | 93.66 ms | 93.76 ms | 94.15 ms | | 16 | 99.58 img/s | 160.65 ms | 160.91 ms | 161.39 ms | 162.34 ms | | 32 | 105.04 img/s | 304.63 ms | 305.53 ms | 305.96 ms | 307.22 ms | | 64 | 108.31 img/s | 590.85 ms | 591.31 ms | 591.70 ms | 593.23 ms | | 128 | 110.05 img/s | 1163.04 ms | 1163.52 ms | 1163.75 ms | 1164.24 ms | **Mixed Precision Inference Latency** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 80.61 img/s | 12.50 ms | 12.56 ms | 12.66 ms | 13.54 ms | | 2 | 104.47 img/s | 19.23 ms | 19.73 ms | 19.92 ms | 20.68 ms | | 4 | 143.68 img/s | 27.91 ms | 28.42 ms | 28.71 ms | 29.47 ms | | 8 | 176.65 img/s | 45.29 ms | 45.93 ms | 46.15 ms | 46.75 ms | | 16 | 203.55 img/s | 78.60 ms | 78.95 ms | 79.25 ms | 79.74 ms | | 32 | 209.77 img/s | 152.54 ms | 153.41 ms | 153.75 ms | 154.82 ms | | 64 | 222.97 img/s | 287.03 ms | 287.91 ms | 288.27 ms | 289.56 ms | | 128 | 226.19 img/s | 565.89 ms | 566.21 ms | 566.38 ms | 567.52 ms | **Mixed Precision Inference Latency + XLA** |**Batch Size**|**Avg throughput**|**Avg latency**|**90% Latency**|**95% Latency**|**99% Latency**| |--------------|------------------|---------------|---------------|---------------|---------------| | 1 | 54.68 img/s | 18.40 ms | 19.17 ms | 19.34 ms | 19.53 ms | | 2 | 102.20 img/s | 19.67 ms | 20.37 ms | 20.55 ms | 24.65 ms | | 4 | 153.96 img/s | 26.05 ms | 26.31 ms | 27.01 ms | 28.96 ms | | 8 | 177.98 img/s | 44.94 ms | 45.25 ms | 45.43 ms | 45.66 ms | | 16 | 237.70 img/s | 67.31 ms | 68.35 ms | 68.87 ms | 69.63 ms | | 32 | 241.79 img/s | 132.34 ms | 133.18 ms | 133.87 ms | 134.92 ms | | 64 | 263.80 img/s | 242.60 ms | 244.25 ms | 245.27 ms | 246.56 ms | | 128 | 272.17 img/s | 470.29 ms | 471.29 ms | 471.78 ms | 473.61 ms | ## Release notes ### Changelog April 2023 - Ceased maintenance of ConvNets in TensorFlow1 June 2020 - Initial release August 2020 - Updated command line argument names - Added support for syntetic dataset with different image size January 2022 - Added barrier at the end of multiprocess run ### Known issues Performance without XLA enabled is low due to BN + ReLU fusion bug.
TensorFlow/Classification/ConvNets/model/blocks
blocks
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from model.blocks.conv2d_block import conv2d_block from model.blocks.resnet_bottleneck_block import bottleneck_block __all__ = [ # conv + bn + act block 'conv2d_block', # resnet blocks 'bottleneck_block' ]
PyTorch/SpeechSynthesis/FastPitch/common/text
text
abbreviations
import re _no_period_re = re.compile(r'(No[.])(?=[ ]?[0-9])') _percent_re = re.compile(r'([ ]?[%])') _half_re = re.compile('([0-9]½)|(½)') _url_re = re.compile(r'([a-zA-Z])\.(com|gov|org)') # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ ('mrs', 'misess'), ('ms', 'miss'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort'), ('sen', 'senator'), ('etc', 'et cetera'), ]] def _expand_no_period(m): word = m.group(0) if word[0] == 'N': return 'Number' return 'number' def _expand_percent(m): return ' percent' def _expand_half(m): word = m.group(1) if word is None: return 'half' return word[0] + ' and a half' def _expand_urls(m): return f'{m.group(1)} dot {m.group(2)}' def normalize_abbreviations(text): text = re.sub(_no_period_re, _expand_no_period, text) text = re.sub(_percent_re, _expand_percent, text) text = re.sub(_half_re, _expand_half, text) text = re.sub('&', ' and ', text) text = re.sub('@', ' at ', text) text = re.sub(_url_re, _expand_urls, text) for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text
DGLPyTorch/DrugDiscovery/SE3Transformer/scripts
scripts
benchmark_train_multi_gpu
#!/usr/bin/env bash # Script to benchmark multi-GPU training performance, with bases precomputation # CLI args with defaults BATCH_SIZE=${1:-240} AMP=${2:-true} python -m torch.distributed.run --nnodes=1 --nproc_per_node=gpu --max_restarts 0 --module \ se3_transformer.runtime.training \ --amp "$AMP" \ --batch_size "$BATCH_SIZE" \ --epochs 16 \ --use_layer_norm \ --norm \ --save_ckpt_path model_qm9.pth \ --task homo \ --precompute_bases \ --seed 42 \ --benchmark
PyTorch/LanguageModeling/Transformer-XL/pytorch/scripts/tests
tests
train_full
#!/bin/bash # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e REPO_DIR=${REPO_DIR:-"/workspace/transformer-xl/pytorch/"} REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_training_throughput MATH=$1 if [[ ${MATH} != "fp16" && ${MATH} != "fp32" ]]; then echo "Unsupported option for MATH, use either 'fp16' or 'fp32'" exit 1 fi PERF_TOLERANCE=0.9 GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq) echo 'GPU_NAME:' "${GPU_NAME}" GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l) echo 'GPU_COUNT:' "${GPU_COUNT}" if (( GPU_COUNT == 16 )); then SYSTEM=dgx2 else SYSTEM=dgx1 fi REFERENCE_PERF=$(grep "${MATH},${GPU_COUNT},${GPU_NAME}" \ ${REFERENCE_FILE} | \cut -f 4 -d ',') if [ -z "${REFERENCE_PERF}" ]; then echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG" TARGET_PERF='' else PERF_THRESHOLD=$(awk 'BEGIN {print ('"${REFERENCE_PERF}"' * '"${PERF_TOLERANCE}"')}') TARGET_PERF='--target_throughput '${PERF_THRESHOLD} fi cd $REPO_DIR bash run_wt103_base.sh train "${GPU_COUNT}" \ --config ${SYSTEM}_${GPU_COUNT}gpu_${MATH} \ --debug \ --target_perplexity 23.4 \ --log_interval 1 \ ${TARGET_PERF}
TensorFlow2/Classification/ConvNets
ConvNets
requirements
six google-api-python-client>=1.6.7 google-cloud-bigquery>=0.31.0 kaggle>=1.3.9 numpy>=1.15.4 oauth2client>=4.1.2 pandas>=0.22.0 psutil>=5.4.3 py-cpuinfo>=3.3.0 scipy>=0.19.1 tensorflow-hub>=0.6.0 tensorflow-model-optimization>=0.2.1 tensorflow-datasets tensorflow-addons dataclasses gin-config tf_slim>=1.1.0 typing sentencepiece Cython matplotlib opencv-python-headless pyyaml Pillow -e git+https://github.com/cocodataset/cocoapi#egg=pycocotools&subdirectory=PythonAPI
CUDA-Optimized/FastSpeech/tacotron2
tacotron2
__init__
# BSD 3-Clause License # Copyright (c) 2018-2020, NVIDIA Corporation # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """https://github.com/NVIDIA/tacotron2""" import tacotron2.model
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
config_util_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.config_util.""" import os import tensorflow as tf from google.protobuf import text_format from object_detection.protos import eval_pb2 from object_detection.protos import image_resizer_pb2 from object_detection.protos import input_reader_pb2 from object_detection.protos import model_pb2 from object_detection.protos import pipeline_pb2 from object_detection.protos import train_pb2 from object_detection.utils import config_util def _write_config(config, config_path): """Writes a config object to disk.""" config_text = text_format.MessageToString(config) with tf.gfile.Open(config_path, "wb") as f: f.write(config_text) def _update_optimizer_with_constant_learning_rate(optimizer, learning_rate): """Adds a new constant learning rate.""" constant_lr = optimizer.learning_rate.constant_learning_rate constant_lr.learning_rate = learning_rate def _update_optimizer_with_exponential_decay_learning_rate( optimizer, learning_rate): """Adds a new exponential decay learning rate.""" exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate exponential_lr.initial_learning_rate = learning_rate def _update_optimizer_with_manual_step_learning_rate( optimizer, initial_learning_rate, learning_rate_scaling): """Adds a learning rate schedule.""" manual_lr = optimizer.learning_rate.manual_step_learning_rate manual_lr.initial_learning_rate = initial_learning_rate for i in range(3): schedule = manual_lr.schedule.add() schedule.learning_rate = initial_learning_rate * learning_rate_scaling**i def _update_optimizer_with_cosine_decay_learning_rate( optimizer, learning_rate, warmup_learning_rate): """Adds a new cosine decay learning rate.""" cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate cosine_lr.learning_rate_base = learning_rate cosine_lr.warmup_learning_rate = warmup_learning_rate class ConfigUtilTest(tf.test.TestCase): def _create_and_load_test_configs(self, pipeline_config): pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") _write_config(pipeline_config, pipeline_config_path) return config_util.get_configs_from_pipeline_file(pipeline_config_path) def test_get_configs_from_pipeline_file(self): """Test that proto configs can be read from pipeline config file.""" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 10 pipeline_config.train_config.batch_size = 32 pipeline_config.train_input_reader.label_map_path = "path/to/label_map" pipeline_config.eval_config.num_examples = 20 pipeline_config.eval_input_reader.add().queue_capacity = 100 _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) self.assertProtoEquals(pipeline_config.model, configs["model"]) self.assertProtoEquals(pipeline_config.train_config, configs["train_config"]) self.assertProtoEquals(pipeline_config.train_input_reader, configs["train_input_config"]) self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"]) self.assertProtoEquals(pipeline_config.eval_input_reader, configs["eval_input_configs"]) def test_create_configs_from_pipeline_proto(self): """Tests creating configs dictionary from pipeline proto.""" pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 10 pipeline_config.train_config.batch_size = 32 pipeline_config.train_input_reader.label_map_path = "path/to/label_map" pipeline_config.eval_config.num_examples = 20 pipeline_config.eval_input_reader.add().queue_capacity = 100 configs = config_util.create_configs_from_pipeline_proto(pipeline_config) self.assertProtoEquals(pipeline_config.model, configs["model"]) self.assertProtoEquals(pipeline_config.train_config, configs["train_config"]) self.assertProtoEquals(pipeline_config.train_input_reader, configs["train_input_config"]) self.assertProtoEquals(pipeline_config.eval_config, configs["eval_config"]) self.assertProtoEquals(pipeline_config.eval_input_reader, configs["eval_input_configs"]) def test_create_pipeline_proto_from_configs(self): """Tests that proto can be reconstructed from configs dictionary.""" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 10 pipeline_config.train_config.batch_size = 32 pipeline_config.train_input_reader.label_map_path = "path/to/label_map" pipeline_config.eval_config.num_examples = 20 pipeline_config.eval_input_reader.add().queue_capacity = 100 _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) pipeline_config_reconstructed = ( config_util.create_pipeline_proto_from_configs(configs)) self.assertEqual(pipeline_config, pipeline_config_reconstructed) def test_save_pipeline_config(self): """Tests that the pipeline config is properly saved to disk.""" pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 10 pipeline_config.train_config.batch_size = 32 pipeline_config.train_input_reader.label_map_path = "path/to/label_map" pipeline_config.eval_config.num_examples = 20 pipeline_config.eval_input_reader.add().queue_capacity = 100 config_util.save_pipeline_config(pipeline_config, self.get_temp_dir()) configs = config_util.get_configs_from_pipeline_file( os.path.join(self.get_temp_dir(), "pipeline.config")) pipeline_config_reconstructed = ( config_util.create_pipeline_proto_from_configs(configs)) self.assertEqual(pipeline_config, pipeline_config_reconstructed) def test_get_configs_from_multiple_files(self): """Tests that proto configs can be read from multiple files.""" temp_dir = self.get_temp_dir() # Write model config file. model_config_path = os.path.join(temp_dir, "model.config") model = model_pb2.DetectionModel() model.faster_rcnn.num_classes = 10 _write_config(model, model_config_path) # Write train config file. train_config_path = os.path.join(temp_dir, "train.config") train_config = train_config = train_pb2.TrainConfig() train_config.batch_size = 32 _write_config(train_config, train_config_path) # Write train input config file. train_input_config_path = os.path.join(temp_dir, "train_input.config") train_input_config = input_reader_pb2.InputReader() train_input_config.label_map_path = "path/to/label_map" _write_config(train_input_config, train_input_config_path) # Write eval config file. eval_config_path = os.path.join(temp_dir, "eval.config") eval_config = eval_pb2.EvalConfig() eval_config.num_examples = 20 _write_config(eval_config, eval_config_path) # Write eval input config file. eval_input_config_path = os.path.join(temp_dir, "eval_input.config") eval_input_config = input_reader_pb2.InputReader() eval_input_config.label_map_path = "path/to/another/label_map" _write_config(eval_input_config, eval_input_config_path) configs = config_util.get_configs_from_multiple_files( model_config_path=model_config_path, train_config_path=train_config_path, train_input_config_path=train_input_config_path, eval_config_path=eval_config_path, eval_input_config_path=eval_input_config_path) self.assertProtoEquals(model, configs["model"]) self.assertProtoEquals(train_config, configs["train_config"]) self.assertProtoEquals(train_input_config, configs["train_input_config"]) self.assertProtoEquals(eval_config, configs["eval_config"]) self.assertProtoEquals(eval_input_config, configs["eval_input_configs"][0]) def _assertOptimizerWithNewLearningRate(self, optimizer_name): """Asserts successful updating of all learning rate schemes.""" original_learning_rate = 0.7 learning_rate_scaling = 0.1 warmup_learning_rate = 0.07 hparams = tf.contrib.training.HParams(learning_rate=0.15) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") # Constant learning rate. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name) _update_optimizer_with_constant_learning_rate(optimizer, original_learning_rate) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) optimizer = getattr(configs["train_config"].optimizer, optimizer_name) constant_lr = optimizer.learning_rate.constant_learning_rate self.assertAlmostEqual(hparams.learning_rate, constant_lr.learning_rate) # Exponential decay learning rate. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name) _update_optimizer_with_exponential_decay_learning_rate( optimizer, original_learning_rate) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) optimizer = getattr(configs["train_config"].optimizer, optimizer_name) exponential_lr = optimizer.learning_rate.exponential_decay_learning_rate self.assertAlmostEqual(hparams.learning_rate, exponential_lr.initial_learning_rate) # Manual step learning rate. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name) _update_optimizer_with_manual_step_learning_rate( optimizer, original_learning_rate, learning_rate_scaling) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) optimizer = getattr(configs["train_config"].optimizer, optimizer_name) manual_lr = optimizer.learning_rate.manual_step_learning_rate self.assertAlmostEqual(hparams.learning_rate, manual_lr.initial_learning_rate) for i, schedule in enumerate(manual_lr.schedule): self.assertAlmostEqual(hparams.learning_rate * learning_rate_scaling**i, schedule.learning_rate) # Cosine decay learning rate. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() optimizer = getattr(pipeline_config.train_config.optimizer, optimizer_name) _update_optimizer_with_cosine_decay_learning_rate(optimizer, original_learning_rate, warmup_learning_rate) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) optimizer = getattr(configs["train_config"].optimizer, optimizer_name) cosine_lr = optimizer.learning_rate.cosine_decay_learning_rate self.assertAlmostEqual(hparams.learning_rate, cosine_lr.learning_rate_base) warmup_scale_factor = warmup_learning_rate / original_learning_rate self.assertAlmostEqual(hparams.learning_rate * warmup_scale_factor, cosine_lr.warmup_learning_rate) def testRMSPropWithNewLearingRate(self): """Tests new learning rates for RMSProp Optimizer.""" self._assertOptimizerWithNewLearningRate("rms_prop_optimizer") def testMomentumOptimizerWithNewLearningRate(self): """Tests new learning rates for Momentum Optimizer.""" self._assertOptimizerWithNewLearningRate("momentum_optimizer") def testAdamOptimizerWithNewLearningRate(self): """Tests new learning rates for Adam Optimizer.""" self._assertOptimizerWithNewLearningRate("adam_optimizer") def testGenericConfigOverride(self): """Tests generic config overrides for all top-level configs.""" # Set one parameter for each of the top-level pipeline configs: pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.ssd.num_classes = 1 pipeline_config.train_config.batch_size = 1 pipeline_config.eval_config.num_visualizations = 1 pipeline_config.train_input_reader.label_map_path = "/some/path" pipeline_config.eval_input_reader.add().label_map_path = "/some/path" pipeline_config.graph_rewriter.quantization.weight_bits = 1 pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") _write_config(pipeline_config, pipeline_config_path) # Override each of the parameters: configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) hparams = tf.contrib.training.HParams( **{ "model.ssd.num_classes": 2, "train_config.batch_size": 2, "train_input_config.label_map_path": "/some/other/path", "eval_config.num_visualizations": 2, "graph_rewriter_config.quantization.weight_bits": 2 }) configs = config_util.merge_external_params_with_configs(configs, hparams) # Ensure that the parameters have the overridden values: self.assertEqual(2, configs["model"].ssd.num_classes) self.assertEqual(2, configs["train_config"].batch_size) self.assertEqual("/some/other/path", configs["train_input_config"].label_map_path) self.assertEqual(2, configs["eval_config"].num_visualizations) self.assertEqual(2, configs["graph_rewriter_config"].quantization.weight_bits) def testNewBatchSize(self): """Tests that batch size is updated appropriately.""" original_batch_size = 2 hparams = tf.contrib.training.HParams(batch_size=16) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_config.batch_size = original_batch_size _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) new_batch_size = configs["train_config"].batch_size self.assertEqual(16, new_batch_size) def testNewBatchSizeWithClipping(self): """Tests that batch size is clipped to 1 from below.""" original_batch_size = 2 hparams = tf.contrib.training.HParams(batch_size=0.5) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_config.batch_size = original_batch_size _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) new_batch_size = configs["train_config"].batch_size self.assertEqual(1, new_batch_size) # Clipped to 1.0. def testOverwriteBatchSizeWithKeyValue(self): """Tests that batch size is overwritten based on key/value.""" pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_config.batch_size = 2 configs = self._create_and_load_test_configs(pipeline_config) hparams = tf.contrib.training.HParams(**{"train_config.batch_size": 10}) configs = config_util.merge_external_params_with_configs(configs, hparams) new_batch_size = configs["train_config"].batch_size self.assertEqual(10, new_batch_size) def testKeyValueOverrideBadKey(self): """Tests that overwriting with a bad key causes an exception.""" pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() configs = self._create_and_load_test_configs(pipeline_config) hparams = tf.contrib.training.HParams(**{"train_config.no_such_field": 10}) with self.assertRaises(ValueError): config_util.merge_external_params_with_configs(configs, hparams) def testOverwriteBatchSizeWithBadValueType(self): """Tests that overwriting with a bad valuye type causes an exception.""" pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_config.batch_size = 2 configs = self._create_and_load_test_configs(pipeline_config) # Type should be an integer, but we're passing a string "10". hparams = tf.contrib.training.HParams(**{"train_config.batch_size": "10"}) with self.assertRaises(TypeError): config_util.merge_external_params_with_configs(configs, hparams) def testNewMomentumOptimizerValue(self): """Tests that new momentum value is updated appropriately.""" original_momentum_value = 0.4 hparams = tf.contrib.training.HParams(momentum_optimizer_value=1.1) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() optimizer_config = pipeline_config.train_config.optimizer.rms_prop_optimizer optimizer_config.momentum_optimizer_value = original_momentum_value _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer new_momentum_value = optimizer_config.momentum_optimizer_value self.assertAlmostEqual(1.0, new_momentum_value) # Clipped to 1.0. def testNewClassificationLocalizationWeightRatio(self): """Tests that the loss weight ratio is updated appropriately.""" original_localization_weight = 0.1 original_classification_weight = 0.2 new_weight_ratio = 5.0 hparams = tf.contrib.training.HParams( classification_localization_weight_ratio=new_weight_ratio) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.ssd.loss.localization_weight = ( original_localization_weight) pipeline_config.model.ssd.loss.classification_weight = ( original_classification_weight) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) loss = configs["model"].ssd.loss self.assertAlmostEqual(1.0, loss.localization_weight) self.assertAlmostEqual(new_weight_ratio, loss.classification_weight) def testNewFocalLossParameters(self): """Tests that the loss weight ratio is updated appropriately.""" original_alpha = 1.0 original_gamma = 1.0 new_alpha = 0.3 new_gamma = 2.0 hparams = tf.contrib.training.HParams( focal_loss_alpha=new_alpha, focal_loss_gamma=new_gamma) pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() classification_loss = pipeline_config.model.ssd.loss.classification_loss classification_loss.weighted_sigmoid_focal.alpha = original_alpha classification_loss.weighted_sigmoid_focal.gamma = original_gamma _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) configs = config_util.merge_external_params_with_configs(configs, hparams) classification_loss = configs["model"].ssd.loss.classification_loss self.assertAlmostEqual(new_alpha, classification_loss.weighted_sigmoid_focal.alpha) self.assertAlmostEqual(new_gamma, classification_loss.weighted_sigmoid_focal.gamma) def testMergingKeywordArguments(self): """Tests that keyword arguments get merged as do hyperparameters.""" original_num_train_steps = 100 desired_num_train_steps = 10 pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_config.num_steps = original_num_train_steps _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"train_steps": desired_num_train_steps} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) train_steps = configs["train_config"].num_steps self.assertEqual(desired_num_train_steps, train_steps) def testGetNumberOfClasses(self): """Tests that number of classes can be retrieved.""" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.model.faster_rcnn.num_classes = 20 _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) number_of_classes = config_util.get_number_of_classes(configs["model"]) self.assertEqual(20, number_of_classes) def testNewTrainInputPath(self): """Tests that train input path can be overwritten with single file.""" original_train_path = ["path/to/data"] new_train_path = "another/path/to/data" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() reader_config = pipeline_config.train_input_reader.tf_record_input_reader reader_config.input_path.extend(original_train_path) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"train_input_path": new_train_path} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) reader_config = configs["train_input_config"].tf_record_input_reader final_path = reader_config.input_path self.assertEqual([new_train_path], final_path) def testNewTrainInputPathList(self): """Tests that train input path can be overwritten with multiple files.""" original_train_path = ["path/to/data"] new_train_path = ["another/path/to/data", "yet/another/path/to/data"] pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() reader_config = pipeline_config.train_input_reader.tf_record_input_reader reader_config.input_path.extend(original_train_path) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"train_input_path": new_train_path} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) reader_config = configs["train_input_config"].tf_record_input_reader final_path = reader_config.input_path self.assertEqual(new_train_path, final_path) def testNewLabelMapPath(self): """Tests that label map path can be overwritten in input readers.""" original_label_map_path = "path/to/original/label_map" new_label_map_path = "path//to/new/label_map" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() train_input_reader = pipeline_config.train_input_reader train_input_reader.label_map_path = original_label_map_path eval_input_reader = pipeline_config.eval_input_reader.add() eval_input_reader.label_map_path = original_label_map_path _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"label_map_path": new_label_map_path} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(new_label_map_path, configs["train_input_config"].label_map_path) for eval_input_config in configs["eval_input_configs"]: self.assertEqual(new_label_map_path, eval_input_config.label_map_path) def testDontOverwriteEmptyLabelMapPath(self): """Tests that label map path will not by overwritten with empty string.""" original_label_map_path = "path/to/original/label_map" new_label_map_path = "" pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() train_input_reader = pipeline_config.train_input_reader train_input_reader.label_map_path = original_label_map_path eval_input_reader = pipeline_config.eval_input_reader.add() eval_input_reader.label_map_path = original_label_map_path _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"label_map_path": new_label_map_path} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(original_label_map_path, configs["train_input_config"].label_map_path) self.assertEqual(original_label_map_path, configs["eval_input_configs"][0].label_map_path) def testNewMaskType(self): """Tests that mask type can be overwritten in input readers.""" original_mask_type = input_reader_pb2.NUMERICAL_MASKS new_mask_type = input_reader_pb2.PNG_MASKS pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() train_input_reader = pipeline_config.train_input_reader train_input_reader.mask_type = original_mask_type eval_input_reader = pipeline_config.eval_input_reader.add() eval_input_reader.mask_type = original_mask_type _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"mask_type": new_mask_type} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(new_mask_type, configs["train_input_config"].mask_type) self.assertEqual(new_mask_type, configs["eval_input_configs"][0].mask_type) def testUseMovingAverageForEval(self): use_moving_averages_orig = False pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = use_moving_averages_orig _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"eval_with_moving_averages": True} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(True, configs["eval_config"].use_moving_averages) def testGetImageResizerConfig(self): """Tests that number of classes can be retrieved.""" model_config = model_pb2.DetectionModel() model_config.faster_rcnn.image_resizer.fixed_shape_resizer.height = 100 model_config.faster_rcnn.image_resizer.fixed_shape_resizer.width = 300 image_resizer_config = config_util.get_image_resizer_config(model_config) self.assertEqual(image_resizer_config.fixed_shape_resizer.height, 100) self.assertEqual(image_resizer_config.fixed_shape_resizer.width, 300) def testGetSpatialImageSizeFromFixedShapeResizerConfig(self): image_resizer_config = image_resizer_pb2.ImageResizer() image_resizer_config.fixed_shape_resizer.height = 100 image_resizer_config.fixed_shape_resizer.width = 200 image_shape = config_util.get_spatial_image_size(image_resizer_config) self.assertAllEqual(image_shape, [100, 200]) def testGetSpatialImageSizeFromAspectPreservingResizerConfig(self): image_resizer_config = image_resizer_pb2.ImageResizer() image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100 image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600 image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension = True image_shape = config_util.get_spatial_image_size(image_resizer_config) self.assertAllEqual(image_shape, [600, 600]) def testGetSpatialImageSizeFromAspectPreservingResizerDynamic(self): image_resizer_config = image_resizer_pb2.ImageResizer() image_resizer_config.keep_aspect_ratio_resizer.min_dimension = 100 image_resizer_config.keep_aspect_ratio_resizer.max_dimension = 600 image_shape = config_util.get_spatial_image_size(image_resizer_config) self.assertAllEqual(image_shape, [-1, -1]) def testEvalShuffle(self): """Tests that `eval_shuffle` keyword arguments are applied correctly.""" original_shuffle = True desired_shuffle = False pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_input_reader.add().shuffle = original_shuffle _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"eval_shuffle": desired_shuffle} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(desired_shuffle, configs["eval_input_configs"][0].shuffle) def testTrainShuffle(self): """Tests that `train_shuffle` keyword arguments are applied correctly.""" original_shuffle = True desired_shuffle = False pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_input_reader.shuffle = original_shuffle _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"train_shuffle": desired_shuffle} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) train_shuffle = configs["train_input_config"].shuffle self.assertEqual(desired_shuffle, train_shuffle) def testOverWriteRetainOriginalImages(self): """Tests that `train_shuffle` keyword arguments are applied correctly.""" original_retain_original_images = True desired_retain_original_images = False pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.retain_original_images = ( original_retain_original_images) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = { "retain_original_images_in_eval": desired_retain_original_images } configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) retain_original_images = configs["eval_config"].retain_original_images self.assertEqual(desired_retain_original_images, retain_original_images) def testOverwriteAllEvalSampling(self): original_num_eval_examples = 1 new_num_eval_examples = 10 pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_input_reader.add().sample_1_of_n_examples = ( original_num_eval_examples) pipeline_config.eval_input_reader.add().sample_1_of_n_examples = ( original_num_eval_examples) _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"sample_1_of_n_eval_examples": new_num_eval_examples} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) for eval_input_config in configs["eval_input_configs"]: self.assertEqual(new_num_eval_examples, eval_input_config.sample_1_of_n_examples) def testOverwriteAllEvalNumEpochs(self): original_num_epochs = 10 new_num_epochs = 1 pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs pipeline_config.eval_input_reader.add().num_epochs = original_num_epochs _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"eval_num_epochs": new_num_epochs} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) for eval_input_config in configs["eval_input_configs"]: self.assertEqual(new_num_epochs, eval_input_config.num_epochs) def testUpdateMaskTypeForAllInputConfigs(self): original_mask_type = input_reader_pb2.NUMERICAL_MASKS new_mask_type = input_reader_pb2.PNG_MASKS pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() train_config = pipeline_config.train_input_reader train_config.mask_type = original_mask_type eval_1 = pipeline_config.eval_input_reader.add() eval_1.mask_type = original_mask_type eval_1.name = "eval_1" eval_2 = pipeline_config.eval_input_reader.add() eval_2.mask_type = original_mask_type eval_2.name = "eval_2" _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"mask_type": new_mask_type} configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) self.assertEqual(configs["train_input_config"].mask_type, new_mask_type) for eval_input_config in configs["eval_input_configs"]: self.assertEqual(eval_input_config.mask_type, new_mask_type) def testErrorOverwritingMultipleInputConfig(self): original_shuffle = False new_shuffle = True pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() eval_1 = pipeline_config.eval_input_reader.add() eval_1.shuffle = original_shuffle eval_1.name = "eval_1" eval_2 = pipeline_config.eval_input_reader.add() eval_2.shuffle = original_shuffle eval_2.name = "eval_2" _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) override_dict = {"eval_shuffle": new_shuffle} with self.assertRaises(ValueError): configs = config_util.merge_external_params_with_configs( configs, kwargs_dict=override_dict) def testCheckAndParseInputConfigKey(self): pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_input_reader.add().name = "eval_1" pipeline_config.eval_input_reader.add().name = "eval_2" _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) specific_shuffle_update_key = "eval_input_configs:eval_2:shuffle" is_valid_input_config_key, key_name, input_name, field_name = ( config_util.check_and_parse_input_config_key( configs, specific_shuffle_update_key)) self.assertTrue(is_valid_input_config_key) self.assertEqual(key_name, "eval_input_configs") self.assertEqual(input_name, "eval_2") self.assertEqual(field_name, "shuffle") legacy_shuffle_update_key = "eval_shuffle" is_valid_input_config_key, key_name, input_name, field_name = ( config_util.check_and_parse_input_config_key(configs, legacy_shuffle_update_key)) self.assertTrue(is_valid_input_config_key) self.assertEqual(key_name, "eval_input_configs") self.assertEqual(input_name, None) self.assertEqual(field_name, "shuffle") non_input_config_update_key = "label_map_path" is_valid_input_config_key, key_name, input_name, field_name = ( config_util.check_and_parse_input_config_key( configs, non_input_config_update_key)) self.assertFalse(is_valid_input_config_key) self.assertEqual(key_name, None) self.assertEqual(input_name, None) self.assertEqual(field_name, "label_map_path") with self.assertRaisesRegexp(ValueError, "Invalid key format when overriding configs."): config_util.check_and_parse_input_config_key( configs, "train_input_config:shuffle") with self.assertRaisesRegexp( ValueError, "Invalid key_name when overriding input config."): config_util.check_and_parse_input_config_key( configs, "invalid_key_name:train_name:shuffle") with self.assertRaisesRegexp( ValueError, "Invalid input_name when overriding input config."): config_util.check_and_parse_input_config_key( configs, "eval_input_configs:unknown_eval_name:shuffle") with self.assertRaisesRegexp( ValueError, "Invalid field_name when overriding input config."): config_util.check_and_parse_input_config_key( configs, "eval_input_configs:eval_2:unknown_field_name") def testUpdateInputReaderConfigSuccess(self): original_shuffle = False new_shuffle = True pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.train_input_reader.shuffle = original_shuffle _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) config_util.update_input_reader_config( configs, key_name="train_input_config", input_name=None, field_name="shuffle", value=new_shuffle) self.assertEqual(configs["train_input_config"].shuffle, new_shuffle) config_util.update_input_reader_config( configs, key_name="train_input_config", input_name=None, field_name="shuffle", value=new_shuffle) self.assertEqual(configs["train_input_config"].shuffle, new_shuffle) def testUpdateInputReaderConfigErrors(self): pipeline_config_path = os.path.join(self.get_temp_dir(), "pipeline.config") pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_input_reader.add().name = "same_eval_name" pipeline_config.eval_input_reader.add().name = "same_eval_name" _write_config(pipeline_config, pipeline_config_path) configs = config_util.get_configs_from_pipeline_file(pipeline_config_path) with self.assertRaisesRegexp(ValueError, "Duplicate input name found when overriding."): config_util.update_input_reader_config( configs, key_name="eval_input_configs", input_name="same_eval_name", field_name="shuffle", value=False) with self.assertRaisesRegexp( ValueError, "Input name name_not_exist not found when overriding."): config_util.update_input_reader_config( configs, key_name="eval_input_configs", input_name="name_not_exist", field_name="shuffle", value=False) with self.assertRaisesRegexp(ValueError, "Unknown input config overriding."): config_util.update_input_reader_config( configs, key_name="eval_input_configs", input_name=None, field_name="shuffle", value=False) if __name__ == "__main__": tf.test.main()
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules
modules
multihead_attention
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Dict, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import Parameter from common.fairseq import utils from common.fairseq.incremental_decoding_utils import with_incremental_state from .fairseq_dropout import FairseqDropout from .quant_noise import quant_noise class RotaryEmbedding(nn.Module): def __init__(self, dim): super().__init__() inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) self.seq_len_cached = None self.cos_cached = None self.sin_cached = None def forward(self, x, seq_dim=0): seq_len = x.shape[seq_dim] if seq_len != self.seq_len_cached: self.seq_len_cached = seq_len t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) freqs = torch.einsum('i,j->ij', t, self.inv_freq) emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.cos_cached = emb.cos()[:, None, :] self.sin_cached = emb.sin()[:, None, :] return self.cos_cached, self.sin_cached def rotate_half(x): x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] return torch.cat((-x2, x1), dim=x1.ndim - 1) @torch.jit.script def apply_rotary_pos_emb(x, cos, sin): return (x * cos) + (rotate_half(x) * sin) @with_incremental_state class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, rotary_embeddings=False, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = FairseqDropout( dropout, module_name=self.__class__.__name__ ) self.rotary_embeddings = rotary_embeddings if self.rotary_embeddings: self.rotary_freq = RotaryEmbedding(embed_dim) else: self.rotary_freq = None self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) self.k_proj = quant_noise( nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, ) -> Tuple[Tensor, Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}" assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if ( not self.rotary_embeddings and not self.onnx_trace and not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() ): assert key is not None and value is not None return F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: # seq_len, batch_size, dim q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) if self.rotary_freq is not None: cos, sin = self.rotary_freq(q) q = apply_rotary_pos_emb(q, cos, sin) k = apply_rotary_pos_emb(k, cos, sin) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) if self.onnx_trace: attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v attn_weights_float = utils.softmax( attn_weights, dim=-1, onnx_trace=self.onnx_trace ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] if self.onnx_trace and attn.size(1) == 1: # when ONNX tracing a single decoder step (sequence length == 1) # the transpose is a no-op copy before view, thus unnecessary attn = attn.contiguous().view(tgt_len, bsz, embed_dim) else: attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights @staticmethod def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask @torch.jit.export def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor, ): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer_k = input_buffer[k] if input_buffer_k is not None: if self.encoder_decoder_attention and input_buffer_k.size( 0 ) == new_order.size(0): break input_buffer[k] = input_buffer_k.index_select(0, new_order) incremental_state = self._set_input_buffer(incremental_state, input_buffer) return incremental_state def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights def upgrade_state_dict_named(self, state_dict, name): prefix = name + "." if name != "" else "" items_to_add = {} keys_to_remove = [] for k in state_dict.keys(): if k.endswith(prefix + "in_proj_weight"): # in_proj_weight used to be q + k + v with same dimensions dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim] items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim] items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :] keys_to_remove.append(k) k_bias = prefix + "in_proj_bias" if k_bias in state_dict.keys(): dim = int(state_dict[k].shape[0] / 3) items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim] items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][ dim : 2 * dim ] items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :] keys_to_remove.append(prefix + "in_proj_bias") for k in keys_to_remove: del state_dict[k] for key, value in items_to_add.items(): state_dict[key] = value
TensorFlow2/LanguageModeling/BERT
BERT
run_squad
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Run BERT on SQuAD 1.1 and SQuAD 2.0 in tf2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import time import shutil import sys import subprocess from absl import app from absl import flags from absl import logging import tensorflow as tf import horovod.tensorflow as hvd import numpy as np from dllogger import Verbosity # Import BERT model libraries. from official.nlp import bert_models import common_flags import input_pipeline from official.modeling import model_training_utils import model_saving_utils from official.nlp import bert_modeling as modeling import optimization # word-piece tokenizer based squad_lib import squad_lib as squad_lib_wp # sentence-piece tokenizer based squad_lib import squad_lib_sp import tokenization import gpu_affinity import tf_trt from official.utils.misc import distribution_utils from official.utils.misc import keras_utils from official.utils.misc import tpu_lib import dllogger_class flags.DEFINE_enum( 'mode', 'train_and_predict', ['train_and_predict', 'train', 'predict', 'export_only', 'sm_predict', 'trt_predict'], 'One of {"train_and_predict", "train", "predict", "export_only", "sm_predict", "trt_predict"}. ' '`train_and_predict`: both train and predict to a json file. ' '`train`: only trains the model. ' 'trains the model and evaluates in the meantime. ' '`predict`: predict answers from the squad json file. ' '`export_only`: will take the latest checkpoint inside ' 'model_dir and export a `SavedModel`.' '`sm_predict`: will load SavedModel from savedmodel_dir and predict answers' '`trt_predict`: will load SavedModel from savedmodel_dir, convert and predict answers with TF-TRT') flags.DEFINE_string('train_data_path', '', 'Training data path with train tfrecords.') flags.DEFINE_string( 'input_meta_data_path', None, 'Path to file that contains meta data about input ' 'to be used for training and evaluation.') flags.DEFINE_string( "eval_script", None, "SQuAD evaluate.py file to compute f1 and exact_match E.g., evaluate-v1.1.py") # Model training specific flags. flags.DEFINE_integer('train_batch_size', 8, 'Total batch size for training.') # Predict processing related. flags.DEFINE_string('predict_file', None, 'Prediction data path with train tfrecords.') flags.DEFINE_string('vocab_file', None, 'The vocabulary file that the BERT model was trained on.') flags.DEFINE_bool( 'do_lower_case', True, 'Whether to lower case the input text. Should be True for uncased ' 'models and False for cased models.') flags.DEFINE_bool( 'verbose_logging', False, 'If true, all of the warnings related to data processing will be printed. ' 'A number of warnings are expected for a normal SQuAD evaluation.') flags.DEFINE_integer('predict_batch_size', 8, 'Total batch size for prediction.') flags.DEFINE_integer( 'n_best_size', 20, 'The total number of n-best predictions to generate in the ' 'nbest_predictions.json output file.') flags.DEFINE_integer( 'max_answer_length', 30, 'The maximum length of an answer that can be generated. This is needed ' 'because the start and end predictions are not conditioned on one another.') flags.DEFINE_string( 'sp_model_file', None, 'The path to the sentence piece model. Used by sentence piece tokenizer ' 'employed by ALBERT.') flags.DEFINE_string( 'savedmodel_dir', None, 'The path of SavedModel for Savedmodel and TF-TRT prediction.') common_flags.define_common_bert_flags() FLAGS = flags.FLAGS MODEL_CLASSES = { 'bert': (modeling.BertConfig, squad_lib_wp, tokenization.FullTokenizer), 'albert': (modeling.AlbertConfig, squad_lib_sp, tokenization.FullSentencePieceTokenizer), } def squad_loss_fn(start_positions, end_positions, start_logits, end_logits, loss_factor=1.0): """Returns sparse categorical crossentropy for start/end logits.""" start_loss = tf.keras.backend.sparse_categorical_crossentropy( start_positions, start_logits, from_logits=True) end_loss = tf.keras.backend.sparse_categorical_crossentropy( end_positions, end_logits, from_logits=True) total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2 total_loss *= loss_factor return total_loss def get_loss_fn(loss_factor=1.0): """Gets a loss function for squad task.""" def _loss_fn(labels, model_outputs): start_positions = labels['start_positions'] end_positions = labels['end_positions'] start_logits, end_logits = model_outputs return squad_loss_fn( start_positions, end_positions, start_logits, end_logits, loss_factor=loss_factor) return _loss_fn def get_raw_results(predictions): """Converts multi-replica predictions to RawResult.""" squad_lib = MODEL_CLASSES[FLAGS.model_type][1] for unique_ids, start_logits, end_logits in zip(predictions['unique_ids'], predictions['start_logits'], predictions['end_logits']): for values in zip(unique_ids.numpy(), start_logits.numpy(), end_logits.numpy()): yield squad_lib.RawResult( unique_id=values[0], start_logits=values[1].tolist(), end_logits=values[2].tolist()) def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size, is_training, use_horovod): """Gets a closure to create a dataset..""" def _dataset_fn(ctx=None): """Returns tf.data.Dataset for distributed BERT pretraining.""" batch_size = ctx.get_per_replica_batch_size( global_batch_size) if ctx else global_batch_size dataset = input_pipeline.create_squad_dataset( input_file_pattern, max_seq_length, batch_size, is_training=is_training, input_pipeline_context=ctx, use_horovod=use_horovod) return dataset return _dataset_fn def predict_squad_customized(strategy, input_meta_data, bert_config, predict_tfrecord_path, num_steps): """Make predictions using a Bert-based squad model.""" predict_dataset_fn = get_dataset_fn( predict_tfrecord_path, input_meta_data['max_seq_length'], FLAGS.predict_batch_size, is_training=False, use_horovod=False) if strategy: predict_iterator = iter( strategy.experimental_distribute_datasets_from_function( predict_dataset_fn)) else: predict_iterator = iter(predict_dataset_fn()) if FLAGS.mode == 'trt_predict': squad_model = tf_trt.TFTRTModel(FLAGS.savedmodel_dir, "amp" if FLAGS.use_fp16 else "fp32") elif FLAGS.mode == 'sm_predict': squad_model = tf_trt.SavedModel(FLAGS.savedmodel_dir, "amp" if FLAGS.use_fp16 else "fp32") else: with distribution_utils.get_strategy_scope(strategy): squad_model, _ = bert_models.squad_model( bert_config, input_meta_data['max_seq_length'], float_type=tf.float16 if FLAGS.use_fp16 else tf.float32) if FLAGS.init_checkpoint: checkpoint = tf.train.Checkpoint(model=squad_model) checkpoint.restore(FLAGS.init_checkpoint).expect_partial() checkpoint_path = tf.train.latest_checkpoint(FLAGS.model_dir) logging.info('Restoring checkpoints from %s', checkpoint_path) checkpoint = tf.train.Checkpoint(model=squad_model) checkpoint.restore(checkpoint_path).expect_partial() @tf.function def predict_step(iterator): """Predicts on distributed devices.""" def _replicated_step(inputs): """Replicated prediction calculation.""" x, _ = inputs unique_ids = x.pop('unique_ids') if FLAGS.benchmark: t0 = tf.timestamp() unique_ids = t0 start_logits, end_logits = squad_model(x, training=False) return dict( unique_ids=unique_ids, start_logits=start_logits, end_logits=end_logits) def tuple_fun(x): return (x,) if strategy: outputs = strategy.experimental_run_v2( _replicated_step, args=(next(iterator),)) map_func = strategy.experimental_local_results else: outputs = _replicated_step(next(iterator),) map_func = tuple_fun return tf.nest.map_structure(map_func, outputs) all_results = [] time_list = [] eval_start_time = time.time() elapsed_secs = 0 for _ in range(num_steps): predictions = predict_step(predict_iterator) if FLAGS.benchmark: # transfer tensor to CPU for synchronization t0 = predictions['unique_ids'][0] start_logits = predictions['start_logits'][0] start_logits.numpy() elapsed_secs = time.time() - t0.numpy() # Removing first 4 (arbitrary) number of startup iterations from perf evaluations if _ > 3: time_list.append(elapsed_secs) continue for result in get_raw_results(predictions): all_results.append(result) if len(all_results) % 100 == 0: logging.info('Made predictions for %d records.', len(all_results)) eval_time_elapsed = time.time() - eval_start_time logging.info("-----------------------------") logging.info("Summary Inference Statistics") logging.info("Batch size = %d", FLAGS.predict_batch_size) logging.info("Sequence Length = %d", input_meta_data['max_seq_length']) logging.info("Precision = %s", "fp16" if FLAGS.use_fp16 else "fp32") logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed, num_steps * FLAGS.predict_batch_size) if FLAGS.benchmark: eval_time_wo_overhead = sum(time_list) time_list.sort() num_sentences = (num_steps - 4) * FLAGS.predict_batch_size avg = np.mean(time_list) cf_50 = max(time_list[:int(len(time_list) * 0.50)]) cf_90 = max(time_list[:int(len(time_list) * 0.90)]) cf_95 = max(time_list[:int(len(time_list) * 0.95)]) cf_99 = max(time_list[:int(len(time_list) * 0.99)]) cf_100 = max(time_list[:int(len(time_list) * 1)]) ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead logging.info("Total Inference Time W/O Overhead = %0.2f for Sequences = %d", eval_time_wo_overhead, (num_steps - 4) * FLAGS.predict_batch_size) logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000) logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000) logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000) logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000) logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000) logging.info("Latency Average (ms) = %0.2f", avg * 1000) logging.info("Throughput Average (sequences/sec) = %0.2f", ss_sentences_per_second) dllogging = input_meta_data['dllogging'] dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT) logging.info("-----------------------------") return all_results def train_squad(strategy, input_meta_data, custom_callbacks=None, run_eagerly=False): """Run bert squad training.""" if strategy: logging.info('Training using customized training loop with distribution' ' strategy.') # Enables XLA in Session Config. Should not be set for TPU. keras_utils.set_config_v2(FLAGS.enable_xla) use_float16 = common_flags.use_float16() if use_float16: tf.keras.mixed_precision.experimental.set_policy('mixed_float16') bert_config = MODEL_CLASSES[FLAGS.model_type][0].from_json_file( FLAGS.bert_config_file) epochs = FLAGS.num_train_epochs num_train_examples = input_meta_data['train_data_size'] max_seq_length = input_meta_data['max_seq_length'] global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps if FLAGS.use_horovod: global_batch_size *= hvd.size() steps_per_epoch = int(num_train_examples / global_batch_size) warmup_steps = int(epochs * num_train_examples * 0.1 / global_batch_size) train_input_fn = get_dataset_fn( FLAGS.train_data_path, max_seq_length, FLAGS.train_batch_size, is_training=True, use_horovod=FLAGS.use_horovod) if FLAGS.benchmark: steps_per_epoch = 800 epochs = 1 def _get_squad_model(): """Get Squad model and optimizer.""" squad_model, core_model = bert_models.squad_model( bert_config, max_seq_length, float_type=tf.float16 if FLAGS.use_fp16 else tf.float32, hub_module_url=FLAGS.hub_module_url) learning_rate = FLAGS.learning_rate * hvd.size() if FLAGS.use_horovod else FLAGS.learning_rate squad_model.optimizer = optimization.create_optimizer( learning_rate, steps_per_epoch * epochs, warmup_steps, FLAGS.optimizer_type) if FLAGS.use_fp16: squad_model.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(squad_model.optimizer, dynamic=True) return squad_model, core_model # The original BERT model does not scale the loss by # 1/num_replicas_in_sync. It could be an accident. So, in order to use # the same hyper parameter, we do the same thing here by keeping each # replica loss as it is. loss_fn = get_loss_fn( loss_factor=1.0 / strategy.num_replicas_in_sync if FLAGS.scale_loss and strategy else 1.0) params = {'dllogging' : input_meta_data['dllogging'], 'FLAGS' : FLAGS} model_training_utils.run_customized_training_loop( strategy=strategy, model_fn=_get_squad_model, loss_fn=loss_fn, model_dir=FLAGS.model_dir, steps_per_epoch=steps_per_epoch, num_accumulative_step=FLAGS.num_accumulation_steps, steps_per_loop=FLAGS.steps_per_loop, epochs=epochs, train_input_fn=train_input_fn, init_checkpoint=FLAGS.init_checkpoint, hvd=hvd if FLAGS.use_horovod else None, run_eagerly=run_eagerly, custom_callbacks=custom_callbacks, params=params) def predict_squad(strategy, input_meta_data): """Makes predictions for a squad dataset.""" keras_utils.set_config_v2(FLAGS.enable_xla) config_cls, squad_lib, tokenizer_cls = MODEL_CLASSES[FLAGS.model_type] bert_config = config_cls.from_json_file(FLAGS.bert_config_file) if tokenizer_cls == tokenization.FullTokenizer: tokenizer = tokenizer_cls( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) else: assert tokenizer_cls == tokenization.FullSentencePieceTokenizer tokenizer = tokenizer_cls(sp_model_file=FLAGS.sp_model_file) doc_stride = input_meta_data['doc_stride'] max_query_length = input_meta_data['max_query_length'] # Whether data should be in Ver 2.0 format. version_2_with_negative = input_meta_data.get('version_2_with_negative', False) eval_examples = squad_lib.read_squad_examples( input_file=FLAGS.predict_file, is_training=False, version_2_with_negative=version_2_with_negative) eval_writer = squad_lib.FeatureWriter( filename=os.path.join(FLAGS.model_dir, 'eval.tf_record'), is_training=False) eval_features = [] def _append_feature(feature, is_padding): if not is_padding: eval_features.append(feature) eval_writer.process_feature(feature) # TPU requires a fixed batch size for all batches, therefore the number # of examples must be a multiple of the batch size, or else examples # will get dropped. So we pad with fake examples which are ignored # later on. kwargs = dict( examples=eval_examples, tokenizer=tokenizer, max_seq_length=input_meta_data['max_seq_length'], doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, output_fn=_append_feature, batch_size=FLAGS.predict_batch_size) # squad_lib_sp requires one more argument 'do_lower_case'. if squad_lib == squad_lib_sp: kwargs['do_lower_case'] = FLAGS.do_lower_case dataset_size = squad_lib.convert_examples_to_features(**kwargs) eval_writer.close() logging.info('***** Running predictions *****') logging.info(' Num orig examples = %d', len(eval_examples)) logging.info(' Num split examples = %d', len(eval_features)) logging.info(' Batch size = %d', FLAGS.predict_batch_size) num_steps = int(dataset_size / FLAGS.predict_batch_size) if FLAGS.benchmark and num_steps > 1000: num_steps = 1000 all_results = predict_squad_customized(strategy, input_meta_data, bert_config, eval_writer.filename, num_steps) if FLAGS.benchmark: return output_prediction_file = os.path.join(FLAGS.model_dir, 'predictions.json') output_nbest_file = os.path.join(FLAGS.model_dir, 'nbest_predictions.json') output_null_log_odds_file = os.path.join(FLAGS.model_dir, 'null_odds.json') squad_lib.write_predictions( eval_examples, eval_features, all_results, FLAGS.n_best_size, FLAGS.max_answer_length, FLAGS.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose=FLAGS.verbose_logging) if FLAGS.eval_script: eval_out = subprocess.check_output([sys.executable, FLAGS.eval_script, FLAGS.predict_file, output_prediction_file]) scores = str(eval_out).strip() exact_match = float(scores.split(":")[1].split(",")[0]) if version_2_with_negative: f1 = float(scores.split(":")[2].split(",")[0]) else: f1 = float(scores.split(":")[2].split("}")[0]) dllogging = input_meta_data['dllogging'] dllogging.logger.log(step=(), data={"f1": f1}, verbosity=Verbosity.DEFAULT) dllogging.logger.log(step=(), data={"exact_match": exact_match}, verbosity=Verbosity.DEFAULT) print(str(eval_out)) def export_squad(model_export_path, input_meta_data): """Exports a trained model as a `SavedModel` for inference. Args: model_export_path: a string specifying the path to the SavedModel directory. input_meta_data: dictionary containing meta data about input and model. Raises: Export path is not specified, got an empty string or None. """ if not model_export_path: raise ValueError('Export path is not specified: %s' % model_export_path) bert_config = MODEL_CLASSES[FLAGS.model_type][0].from_json_file( FLAGS.bert_config_file) squad_model, _ = bert_models.squad_model( bert_config, input_meta_data['max_seq_length'], float_type=tf.float32) model_saving_utils.export_bert_model( model_export_path + '/savedmodel', model=squad_model, checkpoint_dir=FLAGS.model_dir) model_name = FLAGS.triton_model_name model_folder = model_export_path + "/triton_models/" + model_name version_folder = model_folder + "/" + str(FLAGS.triton_model_version) final_model_folder = version_folder + "/model.savedmodel" if not os.path.exists(version_folder): os.makedirs(version_folder) if (not os.path.exists(final_model_folder)): os.rename(model_export_path + '/savedmodel', final_model_folder) print("Model saved to dir", final_model_folder) else: if (FLAGS.triton_model_overwrite): shutil.rmtree(final_model_folder) os.rename(model_export_path + '/savedmodel', final_model_folder) print("WARNING: Existing model was overwritten. Model dir: {}".format(final_model_folder)) else: print("ERROR: Could not save Triton model. Folder already exists. Use '--triton_model_overwrite=True' if you would like to overwrite an existing model. Model dir: {}".format(final_model_folder)) return config_filename = os.path.join(model_folder, "config.pbtxt") if (os.path.exists(config_filename) and not FLAGS.triton_model_overwrite): print("ERROR: Could not save Triton model config. Config file already exists. Use '--triton_model_overwrite=True' if you would like to overwrite an existing model config. Model config: {}".format(config_filename)) return config_template = r""" name: "{model_name}" platform: "tensorflow_savedmodel" max_batch_size: {max_batch_size} input [ {{ name: "input_mask" data_type: TYPE_INT32 dims: {seq_length} }}, {{ name: "input_type_ids" data_type: TYPE_INT32 dims: {seq_length} }}, {{ name: "input_word_ids" data_type: TYPE_INT32 dims: {seq_length} }} ] output [ {{ name: "end_positions" data_type: TYPE_FP32 dims: {seq_length} }}, {{ name: "start_positions" data_type: TYPE_FP32 dims: {seq_length} }} ] {dynamic_batching} instance_group [ {{ count: {engine_count} kind: KIND_GPU gpus: [{gpu_list}] }} ]""" batching_str = "" max_batch_size = FLAGS.triton_max_batch_size if (FLAGS.triton_dyn_batching_delay > 0): # Use only full and half full batches pref_batch_size = [int(max_batch_size / 2.0), max_batch_size] batching_str = r""" dynamic_batching {{ preferred_batch_size: [{0}] max_queue_delay_microseconds: {1} }}""".format(", ".join([str(x) for x in pref_batch_size]), int(FLAGS.triton_dyn_batching_delay * 1000.0)) config_values = { "model_name": model_name, "max_batch_size": max_batch_size, "seq_length": input_meta_data['max_seq_length'], "dynamic_batching": batching_str, "gpu_list": ", ".join([x.name.split(":")[-1] for x in tf.config.list_physical_devices('GPU')]), "engine_count": FLAGS.triton_engine_count } with open(model_folder + "/config.pbtxt", "w") as file: final_config_str = config_template.format_map(config_values) file.write(final_config_str) def main(_): # Users should always run this script under TF 2.x # The container haven't changed version number yet, skip the check. assert tf.version.VERSION.startswith('2.') with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader: input_meta_data = json.loads(reader.read().decode('utf-8')) if FLAGS.mode == 'export_only': export_squad(FLAGS.model_export_path, input_meta_data) return gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) strategy = distribution_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, num_gpus=FLAGS.num_gpus, tpu_address=FLAGS.tpu) if FLAGS.use_horovod: if strategy: raise ValueError('Should not run horovod with distribution strategy') hvd.init() if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') gpu_affinity.set_affinity(hvd.local_rank()) if FLAGS.use_fp16: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) os.makedirs(FLAGS.model_dir, exist_ok=True) dllogging = dllogger_class.dllogger_class(FLAGS.dllog_path) input_meta_data['dllogging'] = dllogging if FLAGS.mode in ('train', 'train_and_predict'): train_squad(strategy, input_meta_data) if FLAGS.mode in ('predict', 'sm_predict', 'trt_predict', 'train_and_predict') and (not FLAGS.use_horovod or hvd.rank() == 0): predict_squad(strategy, input_meta_data) if __name__ == '__main__': flags.mark_flag_as_required('bert_config_file') flags.mark_flag_as_required('model_dir') app.run(main)
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/runtime
runtime
arguments
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES # SPDX-License-Identifier: MIT import argparse import pathlib from se3_transformer.data_loading import QM9DataModule from se3_transformer.model import SE3TransformerPooled from se3_transformer.runtime.utils import str2bool PARSER = argparse.ArgumentParser(description='SE(3)-Transformer') paths = PARSER.add_argument_group('Paths') paths.add_argument('--data_dir', type=pathlib.Path, default=pathlib.Path('./data'), help='Directory where the data is located or should be downloaded') paths.add_argument('--log_dir', type=pathlib.Path, default=pathlib.Path('./results'), help='Directory where the results logs should be saved') paths.add_argument('--dllogger_name', type=str, default='dllogger_results.json', help='Name for the resulting DLLogger JSON file') paths.add_argument('--save_ckpt_path', type=pathlib.Path, default=None, help='File where the checkpoint should be saved') paths.add_argument('--load_ckpt_path', type=pathlib.Path, default=None, help='File of the checkpoint to be loaded') optimizer = PARSER.add_argument_group('Optimizer') optimizer.add_argument('--optimizer', choices=['adam', 'sgd', 'lamb'], default='adam') optimizer.add_argument('--learning_rate', '--lr', dest='learning_rate', type=float, default=0.002) optimizer.add_argument('--min_learning_rate', '--min_lr', dest='min_learning_rate', type=float, default=None) optimizer.add_argument('--momentum', type=float, default=0.9) optimizer.add_argument('--weight_decay', type=float, default=0.1) PARSER.add_argument('--epochs', type=int, default=100, help='Number of training epochs') PARSER.add_argument('--batch_size', type=int, default=240, help='Batch size') PARSER.add_argument('--seed', type=int, default=None, help='Set a seed globally') PARSER.add_argument('--num_workers', type=int, default=8, help='Number of dataloading workers') PARSER.add_argument('--amp', type=str2bool, nargs='?', const=True, default=False, help='Use Automatic Mixed Precision') PARSER.add_argument('--gradient_clip', type=float, default=None, help='Clipping of the gradient norms') PARSER.add_argument('--accumulate_grad_batches', type=int, default=1, help='Gradient accumulation') PARSER.add_argument('--ckpt_interval', type=int, default=-1, help='Save a checkpoint every N epochs') PARSER.add_argument('--eval_interval', dest='eval_interval', type=int, default=20, help='Do an evaluation round every N epochs') PARSER.add_argument('--silent', type=str2bool, nargs='?', const=True, default=False, help='Minimize stdout output') PARSER.add_argument('--wandb', type=str2bool, nargs='?', const=True, default=False, help='Enable W&B logging') PARSER.add_argument('--benchmark', type=str2bool, nargs='?', const=True, default=False, help='Benchmark mode') QM9DataModule.add_argparse_args(PARSER) SE3TransformerPooled.add_argparse_args(PARSER)
PyTorch/Classification/ConvNets/resnet50v1.5/training/AMP
AMP
DGX1V_resnet50_AMP_90E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX1V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
PyTorch/Detection/Efficientdet/effdet/layers
layers
activations_me
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019-2022 Ross Wightman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn as nn from torch.nn import functional as F @torch.jit.script def swish_jit_fwd(x): return x.mul(torch.sigmoid(x)) @torch.jit.script def swish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) return grad_output * (x_sigmoid * (1 + x * (1 - x_sigmoid))) class SwishJitAutoFn(torch.autograd.Function): """ torch.jit.script optimised Swish w/ memory-efficient checkpoint Inspired by conversation btw Jeremy Howard & Adam Pazske https://twitter.com/jeremyphoward/status/1188251041835315200 """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return swish_jit_bwd(x, grad_output) def swish_me(x, inplace=False): return SwishJitAutoFn.apply(x) class SwishMe(nn.Module): def __init__(self, inplace: bool = False): super(SwishMe, self).__init__() def forward(self, x): return SwishJitAutoFn.apply(x) @torch.jit.script def mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x))) @torch.jit.script def mish_jit_bwd(x, grad_output): x_sigmoid = torch.sigmoid(x) x_tanh_sp = F.softplus(x).tanh() return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp)) class MishJitAutoFn(torch.autograd.Function): """ Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 A memory efficient, jit scripted variant of Mish """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return mish_jit_bwd(x, grad_output) def mish_me(x, inplace=False): return MishJitAutoFn.apply(x) class MishMe(nn.Module): def __init__(self, inplace: bool = False): super(MishMe, self).__init__() def forward(self, x): return MishJitAutoFn.apply(x) @torch.jit.script def hard_sigmoid_jit_fwd(x, inplace: bool = False): return (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_sigmoid_jit_bwd(x, grad_output): m = torch.ones_like(x) * ((x >= -3.) & (x <= 3.)) / 6. return grad_output * m class HardSigmoidJitAutoFn(torch.autograd.Function): @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_sigmoid_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_sigmoid_jit_bwd(x, grad_output) def hard_sigmoid_me(x, inplace: bool = False): return HardSigmoidJitAutoFn.apply(x) class HardSigmoidMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSigmoidMe, self).__init__() def forward(self, x): return HardSigmoidJitAutoFn.apply(x) @torch.jit.script def hard_swish_jit_fwd(x): return x * (x + 3).clamp(min=0, max=6).div(6.) @torch.jit.script def hard_swish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= 3.) m = torch.where((x >= -3.) & (x <= 3.), x / 3. + .5, m) return grad_output * m class HardSwishJitAutoFn(torch.autograd.Function): """A memory efficient, jit-scripted HardSwish activation""" @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_swish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_swish_jit_bwd(x, grad_output) def hard_swish_me(x, inplace=False): return HardSwishJitAutoFn.apply(x) class HardSwishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardSwishMe, self).__init__() def forward(self, x): return HardSwishJitAutoFn.apply(x) @torch.jit.script def hard_mish_jit_fwd(x): return 0.5 * x * (x + 2).clamp(min=0, max=2) @torch.jit.script def hard_mish_jit_bwd(x, grad_output): m = torch.ones_like(x) * (x >= -2.) m = torch.where((x >= -2.) & (x <= 0.), x + 1., m) return grad_output * m class HardMishJitAutoFn(torch.autograd.Function): """ A memory efficient, jit scripted variant of Hard Mish Experimental, based on notes by Mish author Diganta Misra at https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md """ @staticmethod def forward(ctx, x): ctx.save_for_backward(x) return hard_mish_jit_fwd(x) @staticmethod def backward(ctx, grad_output): x = ctx.saved_tensors[0] return hard_mish_jit_bwd(x, grad_output) def hard_mish_me(x, inplace: bool = False): return HardMishJitAutoFn.apply(x) class HardMishMe(nn.Module): def __init__(self, inplace: bool = False): super(HardMishMe, self).__init__() def forward(self, x): return HardMishJitAutoFn.apply(x)
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules
modules
quant_noise
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
TensorFlow/Classification/ConvNets/resnet50v1.5/training
training
DGX2_RN50_AMP_90E
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKSPACE=${1:-"/workspace/rn50v15_tf"} DATA_DIR=${2:-"/data"} OTHER=${@:3} if [[ ! -z "${BIND_TO_SOCKET}" ]]; then BIND_TO_SOCKET="--bind-to socket" fi mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 16 python3 main.py --arch=resnet50 \ --mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \ --batch_size=256 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \ --lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=3.0517578125e-05 \ --amp --static_loss_scale 128 \ --data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \ --results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
PyTorch/Classification/ConvNets/efficientnet/inference/FP32
FP32
DGXA100_efficientnet-widese-b4_FP32
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json
TensorFlow/Classification/ConvNets/triton
triton
run_offline_performance_test_on_triton
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ], where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ] `--shape IMAGE:3,224,224`. """ import argparse import csv import os import sys from pathlib import Path from typing import Dict, List, Optional # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.warmup import warmup def calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def update_performance_data(results: List, batch_size: int, performance_partial_file: str): row: Dict = {"batch_size": batch_size} with open(performance_partial_file, "r") as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row) def _parse_batch_sizes(batch_sizes: str): batches = batch_sizes.split(sep=",") return list(map(lambda x: int(x.strip()), batches)) def offline_performance( model_name: str, batch_sizes: List[int], result_path: str, input_shapes: Optional[List[str]] = None, profiling_data: str = "random", triton_instances: int = 1, server_url: str = "localhost", measurement_window: int = 10000, shared_memory: bool = False ): print("\n") print(f"==== Static batching analysis start ====") print("\n") input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else "" results: List[Dict] = list() for batch_size in batch_sizes: print(f"Running performance tests for batch size: {batch_size}") performance_partial_file = f"triton_performance_partial_{batch_size}.csv" exec_args = f"""-max-threads {triton_instances} \ -m {model_name} \ -x 1 \ -c {triton_instances} \ -t {triton_instances} \ -p {measurement_window} \ -v \ -i http \ -u {server_url}:8000 \ -b {batch_size} \ -f {performance_partial_file} \ --input-data {profiling_data} {input_shapes}""" if shared_memory: exec_args += " --shared-memory=cuda" result = os.system(f"perf_client {exec_args}") if result != 0: print(f"Failed running performance tests. Perf client failed with exit code {result}") sys.exit(1) update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=result_path, data=results) show_results(results=results) print("Performance results for static batching stored in: {0}".format(result_path)) print("\n") print(f"==== Analysis done ====") print("\n") def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test") parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling." ) parser.add_argument( "--input-shape", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.") parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.") parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances") parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server") parser.add_argument( "--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000 ) parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true", default=False) args = parser.parse_args() warmup( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, profiling_data=args.input_data, input_shapes=args.input_shape, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) offline_performance( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, profiling_data=args.input_data, input_shapes=args.input_shape, result_path=args.result_path, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) if __name__ == "__main__": main()
TensorFlow/Detection/SSD/models/research/slim/nets
nets
inception_v3_test
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for nets.inception_v1.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nets import inception slim = tf.contrib.slim class InceptionV3Test(tf.test.TestCase): def testBuildClassificationNetwork(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith( 'InceptionV3/Logits/SpatialSqueeze')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Predictions' in end_points) self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes]) def testBuildPreLogitsNetwork(self): batch_size = 5 height, width = 299, 299 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue(net.op.name.startswith('InceptionV3/Logits/AvgPool')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 2048]) self.assertFalse('Logits' in end_points) self.assertFalse('Predictions' in end_points) def testBuildBaseNetwork(self): batch_size = 5 height, width = 299, 299 inputs = tf.random_uniform((batch_size, height, width, 3)) final_endpoint, end_points = inception.inception_v3_base(inputs) self.assertTrue(final_endpoint.op.name.startswith( 'InceptionV3/Mixed_7c')) self.assertListEqual(final_endpoint.get_shape().as_list(), [batch_size, 8, 8, 2048]) expected_endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'] self.assertItemsEqual(end_points.keys(), expected_endpoints) def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 299, 299 endpoints = ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3', 'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = inception.inception_v3_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'InceptionV3/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points.keys()) def testBuildAndCheckAllEndPointsUptoMixed7c(self): batch_size = 5 height, width = 299, 299 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3_base( inputs, final_endpoint='Mixed_7c') endpoints_shapes = {'Conv2d_1a_3x3': [batch_size, 149, 149, 32], 'Conv2d_2a_3x3': [batch_size, 147, 147, 32], 'Conv2d_2b_3x3': [batch_size, 147, 147, 64], 'MaxPool_3a_3x3': [batch_size, 73, 73, 64], 'Conv2d_3b_1x1': [batch_size, 73, 73, 80], 'Conv2d_4a_3x3': [batch_size, 71, 71, 192], 'MaxPool_5a_3x3': [batch_size, 35, 35, 192], 'Mixed_5b': [batch_size, 35, 35, 256], 'Mixed_5c': [batch_size, 35, 35, 288], 'Mixed_5d': [batch_size, 35, 35, 288], 'Mixed_6a': [batch_size, 17, 17, 768], 'Mixed_6b': [batch_size, 17, 17, 768], 'Mixed_6c': [batch_size, 17, 17, 768], 'Mixed_6d': [batch_size, 17, 17, 768], 'Mixed_6e': [batch_size, 17, 17, 768], 'Mixed_7a': [batch_size, 8, 8, 1280], 'Mixed_7b': [batch_size, 8, 8, 2048], 'Mixed_7c': [batch_size, 8, 8, 2048]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name in endpoints_shapes: expected_shape = endpoints_shapes[endpoint_name] self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 299, 299 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope(inception.inception_v3_arg_scope()): inception.inception_v3_base(inputs) total_params, _ = slim.model_analyzer.analyze_vars( slim.get_model_variables()) self.assertAlmostEqual(21802784, total_params) def testBuildEndPoints(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue('Logits' in end_points) logits = end_points['Logits'] self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('AuxLogits' in end_points) aux_logits = end_points['AuxLogits'] self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Mixed_7c' in end_points) pre_pool = end_points['Mixed_7c'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 2048]) self.assertTrue('PreLogits' in end_points) pre_logits = end_points['PreLogits'] self.assertListEqual(pre_logits.get_shape().as_list(), [batch_size, 1, 1, 2048]) def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v3( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth) def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = inception.inception_v3(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = inception.inception_v3( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth) def testRaiseValueErrorWithInvalidDepthMultiplier(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) with self.assertRaises(ValueError): _ = inception.inception_v3(inputs, num_classes, depth_multiplier=-0.1) with self.assertRaises(ValueError): _ = inception.inception_v3(inputs, num_classes, depth_multiplier=0.0) def testHalfSizeImages(self): batch_size = 5 height, width = 150, 150 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 3, 3, 2048]) def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 299, 299 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v3(inputs, num_classes) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 8, 2048]) def testGlobalPoolUnknownImageShape(self): tf.reset_default_graph() batch_size = 1 height, width = 330, 400 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = inception.inception_v3(inputs, num_classes, global_pool=True) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Mixed_7c'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 11, 2048]) def testUnknowBatchSize(self): batch_size = 1 height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = inception.inception_v3(inputs, num_classes) self.assertTrue(logits.op.name.startswith('InceptionV3/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluation(self): batch_size = 2 height, width = 299, 299 num_classes = 1000 eval_inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) inception.inception_v3(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = inception.inception_v3(eval_inputs, num_classes, is_training=False, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25 images = tf.random_uniform([1, 299, 299, 3]) logits, _ = inception.inception_v3(images, num_classes=num_classes, spatial_squeeze=False) with self.test_session() as sess: tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) def testNoBatchNormScaleByDefault(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with slim.arg_scope(inception.inception_v3_arg_scope()): inception.inception_v3(inputs, num_classes, is_training=False) self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) def testBatchNormScale(self): height, width = 299, 299 num_classes = 1000 inputs = tf.placeholder(tf.float32, (1, height, width, 3)) with slim.arg_scope( inception.inception_v3_arg_scope(batch_norm_scale=True)): inception.inception_v3(inputs, num_classes, is_training=False) gamma_names = set( v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$')) self.assertGreater(len(gamma_names), 0) for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'): self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) if __name__ == '__main__': tf.test.main()
PyTorch/Recommendation/DLRM/tests
tests
test_fspecs
#!/bin/bash NAMES=${1:-'*.yaml'} COMMON_OPTS="--embedding_type=joint_sparse --interaction_op=dot" bash test_with_opts.sh "${NAMES}" "${COMMON_OPTS}" # # usage: # docker build . -t nvidia_dlrm_pyt # docker run --security-opt seccomp=unconfined --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_dlrm_pyt bash # cd tests # bash test_fspecs.sh
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/rpn
rpn
__init__
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # from .rpn import build_rpn
PyTorch/SpeechSynthesis/Tacotron2
Tacotron2
requirements
matplotlib numpy inflect librosa scipy resampy==0.3.1 git+https://github.com/NVIDIA/dllogger@v0.1.0#egg=dllogger
PyTorch/Recommendation/NCF
NCF
LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2018 NVIDIA Corporation Copyright 2018 The MLPerf Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
TensorFlow2/Detection/Efficientdet/dataset
dataset
__init__
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # This library is mostly based on tensorflow object detection API # https://github.com/tensorflow/models/blob/master/research/object_detection/dataset_tools/create_coco_tf_record.py
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/model/layers
layers
pooling
# Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES # SPDX-License-Identifier: MIT from typing import Dict, Literal import torch.nn as nn from dgl import DGLGraph from dgl.nn.pytorch import AvgPooling, MaxPooling from torch import Tensor class GPooling(nn.Module): """ Graph max/average pooling on a given feature type. The average can be taken for any feature type, and equivariance will be maintained. The maximum can only be taken for invariant features (type 0). If you want max-pooling for type > 0 features, look into Vector Neurons. """ def __init__(self, feat_type: int = 0, pool: Literal['max', 'avg'] = 'max'): """ :param feat_type: Feature type to pool :param pool: Type of pooling: max or avg """ super().__init__() assert pool in ['max', 'avg'], f'Unknown pooling: {pool}' assert feat_type == 0 or pool == 'avg', 'Max pooling on type > 0 features will break equivariance' self.feat_type = feat_type self.pool = MaxPooling() if pool == 'max' else AvgPooling() def forward(self, features: Dict[str, Tensor], graph: DGLGraph, **kwargs) -> Tensor: pooled = self.pool(graph, features[str(self.feat_type)]) return pooled.squeeze(dim=-1)