relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf | tf | deploy_sparse | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel (tgrel@nvidia.com)
import json
import os
import tensorflow as tf
from tensorflow.python.saved_model import save_options
from nn.embedding import DualEmbeddingGroup
class Model(tf.keras.Model):
def __init__(self, cardinalities, output_dim, memory_threshold):
super().__init__()
self.cardinalities = cardinalities
self.output_dim = output_dim
self.embedding = DualEmbeddingGroup(cardinalities, output_dim, memory_threshold, use_mde_embeddings=False)
@tf.function
def call(self, x):
x = self.embedding(x)
x = tf.reshape(x, [-1, len(self.cardinalities) * self.output_dim])
return x
_sparse_model_config_template = r"""name: "{model_name}"
platform: "tensorflow_savedmodel"
max_batch_size:{max_batch_size}
optimization {{
execution_accelerators {{
gpu_execution_accelerator {{
name: "gpu_io"
}}
}}
}}
version_policy: {{
specific:{{versions: {version}}}
}},
instance_group [
{{
count: {engine_count_per_device}
kind : KIND_GPU
gpus : [0]
}}
]"""
def save_triton_config(
dst_path, model_name, version, max_batch_size, engine_count_per_device
):
config_str = _sparse_model_config_template.format(
model_name=model_name,
max_batch_size=max_batch_size,
version=version,
engine_count_per_device=engine_count_per_device,
)
with open(dst_path, "w") as f:
f.write(config_str)
print("Wrote sparse model Triton config to:", dst_path)
def deploy_sparse(
src,
dst,
model_name,
max_batch_size,
engine_count_per_device,
memory_threshold_gb,
num_gpus=1,
version="1",
**kwargs,
):
print("deploy sparse dst: ", dst)
with open(os.path.join(src, "config.json")) as f:
src_config = json.load(f)
model = Model(cardinalities=src_config["categorical_cardinalities"],
output_dim=src_config['embedding_dim'][0],
memory_threshold=memory_threshold_gb)
x = tf.zeros(shape=(65536, len(src_config["categorical_cardinalities"])), dtype=tf.int32)
_ = model(x)
model.embedding.restore_checkpoint(src)
options = save_options.SaveOptions(experimental_variable_policy=save_options.VariablePolicy.SAVE_VARIABLE_DEVICES)
savedmodel_dir = os.path.join(dst, '1', 'model.savedmodel')
os.makedirs(savedmodel_dir)
tf.keras.models.save_model(model=model, filepath=savedmodel_dir, overwrite=True, options=options)
save_triton_config(
dst_path=os.path.join(dst, "config.pbtxt"),
model_name=model_name,
version=version,
max_batch_size=max_batch_size,
engine_count_per_device=engine_count_per_device,
)
return len(src_config["categorical_cardinalities"])
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/scripts/tests | tests | train_bench | #!/bin/bash
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
REPO_DIR=${REPO_DIR:-"/workspace/transformer-xl/pytorch/"}
REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_training_throughput
MATH=$1
if [[ ${MATH} != "fp16" && ${MATH} != "fp32" ]]; then
echo "Unsupported option for MATH, use either 'fp16' or 'fp32'"
exit 1
fi
PERF_TOLERANCE=0.9
GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq)
echo 'GPU_NAME:' "${GPU_NAME}"
GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l)
echo 'GPU_COUNT:' "${GPU_COUNT}"
if (( GPU_COUNT == 16 )); then
SYSTEM=dgx2
else
SYSTEM=dgx1
fi
REFERENCE_PERF=$(grep "${MATH},${GPU_COUNT},${GPU_NAME}" \
${REFERENCE_FILE} | \cut -f 4 -d ',')
if [ -z "${REFERENCE_PERF}" ]; then
echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG"
TARGET_PERF=''
else
PERF_THRESHOLD=$(awk 'BEGIN {print ('"${REFERENCE_PERF}"' * '"${PERF_TOLERANCE}"')}')
TARGET_PERF='--target_throughput '${PERF_THRESHOLD}
fi
cd $REPO_DIR
bash run_wt103_base.sh train "${GPU_COUNT}" \
--config ${SYSTEM}_${GPU_COUNT}gpu_${MATH} \
--max_step $((512 / GPU_COUNT)) \
--debug \
--no_eval \
--log_interval 1 \
${TARGET_PERF}
|
PyTorch/LanguageModeling/BART/bart/tokenization | tokenization | tokenization_utils_fast | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for fast tokenizers (provided by HuggingFace's tokenizers library).
For slow (python) tokenizers see tokenization_utils.py
"""
import logging
import os
from collections import defaultdict
from typing import Any, Dict, List, Optional, Tuple, Union
from tokenizers import Encoding as EncodingFast
from tokenizers.decoders import Decoder as DecoderFast
from tokenizers.implementations import BaseTokenizer as BaseTokenizerFast
from utils.file_utils import add_end_docstrings
from bart.tokenization.tokenization_utils_base import (
INIT_TOKENIZER_DOCSTRING,
AddedToken,
BatchEncoding,
PaddingStrategy,
PreTokenizedInput,
PreTokenizedInputPair,
PreTrainedTokenizerBase,
TextInput,
TextInputPair,
TruncationStrategy,
)
logger = logging.getLogger(__name__)
@add_end_docstrings(
INIT_TOKENIZER_DOCSTRING,
"""
.. automethod:: __call__
""",
)
class PreTrainedTokenizerFast(PreTrainedTokenizerBase):
"""
Base class for all fast tokenizers (wrapping HuggingFace tokenizers library).
Inherits from :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerBase`.
Handles all the shared methods for tokenization and special tokens, as well as methods for
downloading/caching/loading pretrained tokenizers, as well as adding tokens to the vocabulary.
This class also contains the added tokens in a unified way on top of all tokenizers so we don't
have to handle the specific vocabulary augmentation methods of the various underlying
dictionary structures (BPE, sentencepiece...).
"""
def __init__(self, tokenizer: BaseTokenizerFast, **kwargs):
if not isinstance(tokenizer, BaseTokenizerFast):
raise ValueError(
"Tokenizer should be an instance of a BaseTokenizer " "provided by HuggingFace tokenizers library."
)
self._tokenizer: BaseTokenizerFast = tokenizer
# We call this after having initialized the backend tokenizer because we update it.
super().__init__(**kwargs)
@property
def is_fast(self) -> bool:
return True
@property
def vocab_size(self) -> int:
"""
:obj:`int`: Size of the base vocabulary (without the added tokens).
"""
return self._tokenizer.get_vocab_size(with_added_tokens=False)
def get_vocab(self) -> Dict[str, int]:
"""
Returns the vocabulary as a dictionary of token to index.
:obj:`tokenizer.get_vocab()[token]` is equivalent to :obj:`tokenizer.convert_tokens_to_ids(token)` when
:obj:`token` is in the vocab.
Returns:
:obj:`Dict[str, int]`: The vocabulary.
"""
return self._tokenizer.get_vocab(with_added_tokens=True)
def get_added_vocab(self) -> Dict[str, int]:
"""
Returns the added tokens in the vocabulary as a dictionary of token to index.
Returns:
:obj:`Dict[str, int]`: The added tokens.
"""
base_vocab = self._tokenizer.get_vocab(with_added_tokens=False)
full_vocab = self._tokenizer.get_vocab(with_added_tokens=True)
added_vocab = dict((tok, index) for tok, index in full_vocab.items() if tok not in base_vocab)
return added_vocab
def __len__(self) -> int:
"""
Size of the full vocabulary with the added tokens.
"""
return self._tokenizer.get_vocab_size(with_added_tokens=True)
@property
def backend_tokenizer(self) -> BaseTokenizerFast:
"""
:obj:`tokenizers.implementations.BaseTokenizer`: The Rust tokenizer used as a backend.
"""
return self._tokenizer
@property
def decoder(self) -> DecoderFast:
"""
:obj:`tokenizers.decoders.Decoder`: The Rust decoder for this tokenizer.
"""
return self._tokenizer._tokenizer.decoder
def _convert_encoding(
self,
encoding: EncodingFast,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
) -> Dict[str, Any]:
""" Convert the encoding representation (from low-level HuggingFace tokenizer output) to a python Dict.
Overflowing tokens are converted to additional examples (like batches) so the output values of
the dict are lists (overflows) of lists (tokens).
Output shape: (overflows, sequence length)
"""
if return_token_type_ids is None:
return_token_type_ids = "token_type_ids" in self.model_input_names
if return_attention_mask is None:
return_attention_mask = "attention_mask" in self.model_input_names
if return_overflowing_tokens and encoding.overflowing is not None:
encodings = [encoding] + encoding.overflowing
else:
encodings = [encoding]
encoding_dict = defaultdict(list)
for e in encodings:
encoding_dict["input_ids"].append(e.ids)
if return_token_type_ids:
encoding_dict["token_type_ids"].append(e.type_ids)
if return_attention_mask:
encoding_dict["attention_mask"].append(e.attention_mask)
if return_special_tokens_mask:
encoding_dict["special_tokens_mask"].append(e.special_tokens_mask)
if return_offsets_mapping:
encoding_dict["offset_mapping"].append(e.offsets)
if return_length:
encoding_dict["length"].append(len(e.ids))
return encoding_dict
def convert_tokens_to_ids(self, tokens: Union[str, List[str]]) -> Union[int, List[int]]:
"""
Converts a token string (or a sequence of tokens) in a single integer id (or a sequence of ids), using the
vocabulary.
Args:
token (:obj:`str` or :obj:`List[str]`): One or several token(s) to convert to token id(s).
Returns:
:obj:`int` or :obj:`List[int]`: The token id or list of token ids.
"""
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token: str) -> int:
index = self._tokenizer.token_to_id(token)
if index is None:
return self.unk_token_id
return index
def _convert_id_to_token(self, index: int) -> Optional[str]:
return self._tokenizer.id_to_token(int(index))
def _add_tokens(self, new_tokens: List[Union[str, AddedToken]], special_tokens=False) -> int:
if special_tokens:
return self._tokenizer.add_special_tokens(new_tokens)
return self._tokenizer.add_tokens(new_tokens)
def num_special_tokens_to_add(self, pair: bool = False) -> int:
"""
Returns the number of added tokens when encoding a sequence with special tokens.
.. note::
This encodes a dummy input and checks the number of added tokens, and is therefore not efficient. Do not
put this inside your training loop.
Args:
pair (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence.
Returns:
:obj:`int`: Number of special tokens added to sequences.
"""
return self._tokenizer.num_special_tokens_to_add(pair)
def convert_ids_to_tokens(
self, ids: Union[int, List[int]], skip_special_tokens: bool = False
) -> Union[str, List[str]]:
"""
Converts a single index or a sequence of indices in a token or a sequence of tokens, using the vocabulary
and added tokens.
Args:
ids (:obj:`int` or :obj:`List[int]`):
The token id (or token ids) to convert to tokens.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
Returns:
:obj:`str` or :obj:`List[str]`: The decoded token(s).
"""
if isinstance(ids, int):
return self._tokenizer.id_to_token(ids)
tokens = []
for index in ids:
index = int(index)
if skip_special_tokens and index in self.all_special_ids:
continue
tokens.append(self._tokenizer.id_to_token(index))
return tokens
def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False) -> List[str]:
"""
Converts a string in a sequence of tokens, using the backend Rust tokenizer.
Args:
text (:obj:`str`):
The sequence to be encoded.
pair (:obj:`str`, `optional`):
A second sequence to be encoded with the first.
add_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to add the special tokens associated with the corresponding model.
Returns:
:obj:`List[str]`: The list of tokens.
"""
return self._tokenizer.encode(text, pair, add_special_tokens=add_special_tokens).tokens
def set_truncation_and_padding(
self,
padding_strategy: PaddingStrategy,
truncation_strategy: TruncationStrategy,
max_length: int,
stride: int,
pad_to_multiple_of: Optional[int],
):
"""
Define the truncation and the padding strategies for fast tokenizers (provided by HuggingFace tokenizers
library) and restore the tokenizer settings afterwards.
The provided tokenizer has no padding / truncation strategy before the managed section. If your tokenizer set a
padding / truncation strategy before, then it will be reset to no padding / truncation when exiting the managed
section.
Args:
padding_strategy (:class:`~transformers.tokenization_utils_base.PaddingStrategy`):
The kind of padding that will be applied to the input
truncation_strategy (:class:`~transformers.tokenization_utils_base.TruncationStrategy`):
The kind of truncation that will be applied to the input
max_length (:obj:`int`):
The maximum size of a sequence.
stride (:obj:`int`):
The stride to use when handling overflow.
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
"""
# Set truncation and padding on the backend tokenizer
if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE:
self._tokenizer.enable_truncation(max_length, stride=stride, strategy=truncation_strategy.value)
else:
self._tokenizer.no_truncation()
if padding_strategy != PaddingStrategy.DO_NOT_PAD:
self._tokenizer.enable_padding(
length=max_length if padding_strategy == PaddingStrategy.MAX_LENGTH else None,
direction=self.padding_side,
pad_id=self.pad_token_id,
pad_type_id=self.pad_token_type_id,
pad_token=self.pad_token,
pad_to_multiple_of=pad_to_multiple_of,
)
else:
self._tokenizer.no_padding()
def _batch_encode_plus(
self,
batch_text_or_text_pairs: Union[
List[TextInput], List[TextInputPair], List[PreTokenizedInput], List[PreTokenizedInputPair]
],
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[str] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
if not isinstance(batch_text_or_text_pairs, list):
raise ValueError(
"batch_text_or_text_pairs has to be a list (got {})".format(type(batch_text_or_text_pairs))
)
if kwargs:
raise ValueError(f"Keyword arguments {kwargs} not recognized.")
# Set the truncation and padding strategy and restore the initial configuration
self.set_truncation_and_padding(
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
)
# Avoid thread overhead if only one example.
if len(batch_text_or_text_pairs) == 1:
if isinstance(batch_text_or_text_pairs[0], tuple):
# We got a Tuple with a pair of sequences
encodings = self._tokenizer.encode(
*batch_text_or_text_pairs[0],
add_special_tokens=add_special_tokens,
is_pretokenized=is_pretokenized,
)
else:
# We got a single sequence
encodings = self._tokenizer.encode(
batch_text_or_text_pairs[0],
add_special_tokens=add_special_tokens,
is_pretokenized=is_pretokenized,
)
encodings = [encodings]
else:
encodings = self._tokenizer.encode_batch(
batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=is_pretokenized
)
# Convert encoding to dict
# `Tokens` has type: List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]]
# with nested dimensions corresponding to batch, overflows, sequence length
tokens = [
self._convert_encoding(
encoding=encoding,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
)
for encoding in encodings
]
# Convert the output to have dict[list] from list[dict]
sanitized = {}
for key in tokens[0].keys():
# To List[List[List[int]]] of shape (batch, overflows, sequence length)
stack = [e for item in tokens for e in item[key]]
sanitized[key] = stack
# If returning overflowing tokens, we need to return a mapping
# from the batch idx to the original sample
if return_overflowing_tokens:
overflow_to_sample_mapping = []
for i, enc in enumerate(tokens):
overflow_to_sample_mapping += [i] * len(enc["input_ids"])
sanitized["overflow_to_sample_mapping"] = overflow_to_sample_mapping
return BatchEncoding(sanitized, encodings, tensor_type=return_tensors)
def _encode_plus(
self,
text: Union[TextInput, PreTokenizedInput],
text_pair: Optional[Union[TextInput, PreTokenizedInput]] = None,
add_special_tokens: bool = True,
padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
max_length: Optional[int] = None,
stride: int = 0,
is_pretokenized: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
**kwargs
) -> BatchEncoding:
batched_input = [(text, text_pair)] if text_pair else [text]
batched_output = self._batch_encode_plus(
batched_input,
is_pretokenized=is_pretokenized,
add_special_tokens=add_special_tokens,
padding_strategy=padding_strategy,
truncation_strategy=truncation_strategy,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
**kwargs,
)
# Return tensor is None, then we can remove the leading batch axis
# Overfolwing tokens are returned as a batch of output so we keep them in this case
if return_tensors is None and not return_overflowing_tokens:
batched_output = BatchEncoding(
{
key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
for key, value in batched_output.items()
},
batched_output.encodings,
)
return batched_output
def decode(
self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
Args:
token_ids (:obj:`List[int]`):
List of tokenized input ids. Can be obtained using the ``__call__`` method.
skip_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to clean up the tokenization spaces.
Returns:
:obj:`str`: The decoded sentence.
"""
text = self._tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)
if clean_up_tokenization_spaces:
clean_text = self.clean_up_tokenization(text)
return clean_text
else:
return text
def save_vocabulary(self, save_directory: str) -> Tuple[str]:
"""
Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
.. warning::
Please use :meth:`~transformers.PreTrainedTokenizer.save_pretrained` to save the full tokenizer state if
you want to reload it using the :meth:`~transformers.PreTrainedTokenizer.from_pretrained` class method.
Args:
save_directory (:obj:`str`): The path to adirectory where the tokenizer will be saved.
Returns:
A tuple of :obj:`str`: The files saved.
"""
if os.path.isdir(save_directory):
files = self._tokenizer.save_model(save_directory)
else:
folder, file = os.path.split(os.path.abspath(save_directory))
files = self._tokenizer.save_model(folder, name=file)
return tuple(files) |
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_inception_resnet_v2_atrous_pets | # Faster R-CNN with Inception Resnet v2, Atrous version;
# Configured for Oxford-IIIT Pets Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 37
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_resnet_v2'
first_stage_features_stride: 8
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 8
width_stride: 8
}
}
first_stage_atrous_rate: 2
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 17
maxpool_kernel_size: 1
maxpool_stride: 1
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 100
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
load_all_detection_checkpoint_vars: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
num_examples: 1101
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cpu | cpu | vision | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#pragma once
#include <torch/extension.h>
at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio);
at::Tensor nms_cpu(const at::Tensor& dets,
const at::Tensor& scores,
const float threshold);
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/test | test | CMakeLists | ##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
function(add_unit_test test_file)
get_filename_component(test_name "${test_file}" NAME_WE)
add_executable(${test_name} ${test_file} UnitTest.cpp)
target_link_libraries(${test_name} tt2i)
add_test(NAME ${test_name}
COMMAND "${CMAKE_CURRENT_BINARY_DIR}/${test_name}"
WORKING_DIRECTORY "${PROJECT_SOURCE_DIR}")
endfunction()
include_directories(
../extra
../trt/plugins/taco2AttentionPlugin/
../trt/plugins/taco2DenoiseTransformPlugin/
../trt/plugins/taco2LSTMCellPlugin/
../trt/plugins/taco2ModulationRemovalPlugin/
../trt/plugins/taco2PrenetPlugin/
../trt/plugins/taco2ProjectionPlugin/
../trt/plugins/common/
../trt/
../trt/util
../trt/tacotron2
../trt/waveglow
../trt/denoiser
../trt/common
)
file(GLOB tests *_test.cpp)
foreach (file ${tests})
add_unit_test(${file})
endforeach()
|
JAX/LanguageModeling/PAXML | PAXML | README | Paxml (aka Pax) is a framework for training LLMs. It allows for advanced and configurable experimentation and parallelization. It is based on [JAX](https://github.com/google/jax) and [Praxis](https://github.com/google/praxis).
# PAXML on GPUs
Please refer to [Rosetta PAXML](https://github.com/NVIDIA/JAX-Toolbox/tree/main/rosetta/rosetta/projects/pax), NVIDIA's project that enables seamless training of LLMs, CV models and multimodal models in JAX, for information about running models and experiments on GPUs in PAXML.
|
CUDA-Optimized/FastSpeech/waveglow | waveglow | loss_function | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
return loss / (z.size(0) * z.size(1) * z.size(2))
|
TensorFlow/Segmentation/UNet_Industrial/notebooks | notebooks | Colab_UNet_Industrial_TF_TFHub_inference_demo | #!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_inference_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[ ]:
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # UNet Industrial Inference Demo with TensorFlow Hub
# ## Overview
#
#
# In this notebook, we will demo the process of inference with NVIDIA pre-trained UNet Industrial defects detection TensorFlow Hub modules.
#
# NVIDIA pre-trained U-Net models for defect detection are adapted from the original version of the [U-Net model](https://arxiv.org/abs/1505.04597) which is
# a convolutional auto-encoder for 2D image segmentation. U-Net was first introduced by
# Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper:
# [U-Net: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597).
#
# ### Requirement
# 1. Before running this notebook, please set the Colab runtime environment to GPU via the menu *Runtime => Change runtime type => GPU*.
#
#
# In[1]:
get_ipython().system('nvidia-smi')
# The below code checks whether a Tensor-Core GPU is present. Tensor Cores can accelerate large matrix operations by performing mixed-precision matrix multiply and accumulate calculations in a single operation.
# In[2]:
get_ipython().run_line_magic('tensorflow_version', '1.x')
import tensorflow as tf
print(tf.__version__) # This notebook runs on TensorFlow 1.x.
from tensorflow.python.client import device_lib
def check_tensor_core_gpu_present():
local_device_protos = device_lib.list_local_devices()
for line in local_device_protos:
if "compute capability" in str(line):
compute_capability = float(line.physical_device_desc.split("compute capability: ")[-1])
if compute_capability>=7.0:
return True
print("Tensor Core GPU Present:", check_tensor_core_gpu_present())
tensor_core_gpu = check_tensor_core_gpu_present()
# 2. Next, we clone the NVIDIA Github UNet_Industrial repository and set up the workspace.
# In[3]:
get_ipython().system('git clone https://github.com/NVIDIA/DeepLearningExamples')
# In[4]:
get_ipython().run_cell_magic('bash', '', 'cd DeepLearningExamples\ngit checkout master\n')
# In[5]:
import os
WORKSPACE_DIR='/content/DeepLearningExamples/TensorFlow/Segmentation/UNet_Industrial/notebooks'
os.chdir(WORKSPACE_DIR)
print (os.getcwd())
# In[6]:
get_ipython().system('pip install tensorflow_hub==0.6.0')
# ## Data download
#
# We will first download some data for testing purposes, in particular, the [Weakly Supervised Learning for Industrial Optical Inspection (DAGM 2007)](https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html) dataset.
#
# > The competition is inspired by problems from industrial image processing. In order to satisfy their customers' needs, companies have to guarantee the quality of their products, which can often be achieved only by inspection of the finished product. Automatic visual defect detection has the potential to reduce the cost of quality assurance significantly.
# >
# > The competitors have to design a stand-alone algorithm which is able to detect miscellaneous defects on various background textures.
# >
# > The particular challenge of this contest is that the algorithm must learn, without human intervention, to discern defects automatically from a weakly labeled (i.e., labels are not exact to the pixel level) training set, the exact characteristics of which are unknown at development time. During the competition, the programs have to be trained on new data without any human guidance.
#
# **Source:** https://resources.mpi-inf.mpg.de/conference/dagm/2007/prizes.html
#
# In[ ]:
get_ipython().system(' ./download_and_preprocess_dagm2007_public.sh ./data')
# The final data directory should look like:
#
# ```
# ./data
# raw_images
# public
# Class1
# Class2
# Class3
# Class4
# Class5
# Class6
# Class1_def
# Class2_def
# Class3_def
# Class4_def
# Class5_def
# Class6_def
# private
# zip_files
# ```
# Each data directory contains training images corresponding to one of the first 6 types of defects.
# ## Load UNet TF-Hub modules from Google Drive (Optional)
#
# This step allows you to connect and load pretrained UNet TF-Hub modules from Google Drive (only if you have modules saved there - see this [notebook](https://colab.research.google.com/github/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial/notebooks/Colab_UNet_Industrial_TF_TFHub_export.ipynb) on UNet TF-Hub module creation and export to Google Drive). Execute the below cell to authorize Colab to access your Google Drive content, then copy the saved TF-Hub modules to Colab.
# In[ ]:
from google.colab import drive
drive.mount('/content/gdrive')
# In[ ]:
get_ipython().system('cp -r "/content/gdrive/My Drive/NVIDIA/Unet_modules" .')
# In[ ]:
get_ipython().system('ls Unet_modules')
# ## Inference with UNet TF-Hub modules
#
# Next, we will load one of the pretrained UNet TF-Hub modules (corresponding to one of the 10 classes of the DAGM 2007 dataset) and carry out inference.
#
# In order to load TF-Hub modules, there are several options:
#
# - Load from a local cache or directory
#
# - Load from a remote repository
# In[ ]:
import tensorflow_hub as hub
# Loading from a local cache/directory
#module = hub.Module("Unet_modules/Class_1", trainable=False)
# Loading from a remote repository. The 10 NVIDIA UNet TF-Hub modules are available at
# https://tfhub.dev/nvidia/unet/industrial/class_1/1 (similarly for class 2, 3 ...) and
# https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_{1..10}
module = hub.Module("https://tfhub.dev/nvidia/unet/industrial/class_1/1") # or class_2, class_3 etc...
#module = hub.Module("https://developer.download.nvidia.com/compute/redist/Binary_Files/unet_tfhub_modules/class_1/1.tar.gz") # or cls_as2, class_3 etc...
# In[9]:
print(module.get_signature_names())
# In[10]:
print(module.get_input_info_dict()) # When no signature is given, considers it as 'default'
# In[11]:
print(module.get_output_info_dict())
# As seen, this module expects inputs as grayscale images of size 512x512, and produce masks of the same size.
# In[12]:
# Load a test image
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('./data/raw_images/public/Class1_def/1.png')
plt.figure(figsize = (10,10));
plt.imshow(img, cmap='gray');
# As we can see in this figure, there exists a defective area in the top left corner. We will now start a TF session and carry out inference on the normalized test image with the loaded TF-Hub module.
# In[13]:
# Image preprocessing
img = np.expand_dims(img, axis=2)
img = np.expand_dims(img, axis=0)
img = (img-0.5)/0.5
output = module(img)
# In[14]:
print(output.shape)
# In[ ]:
import tensorflow as tf
with tf.Session() as sess:
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
pred = sess.run(output)
# In[16]:
# Print out model predicted mask
plt.figure(figsize = (10,10));
plt.imshow(np.squeeze(pred), cmap='gray');
# As expected, the TF-Hub module points out the correct defective area in this image. Please feel free to try out other defective images for Class 1 within `./data/raw_images/public/Class1_def/`, or load the other UNet modules and test data for other classes from 1 to 10.
# In[ ]:
get_ipython().system('ls ./data/raw_images/public/Class1_def/')
# # Conclusion
#
# In this notebook, we have walked through the process of loading a pretrained UNet-Industrial TF-Hub module and carrying out inference on a test image.
# ## What's next
# Now it's time to try the UNet-Industrial TF Hub modules on your own data.
# In[ ]:
|
PaddlePaddle/Classification/RN50v1.5 | RN50v1.5 | export_model | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import paddle
import program
from dali import build_dataloader
from utils.mode import Mode
from utils.save_load import init_ckpt
from utils.logger import setup_dllogger
from utils.config import parse_args, print_args
def main(args):
'''
Export saved model params to paddle inference model
'''
setup_dllogger(args.trt_export_log_path)
if args.show_config:
print_args(args)
eval_dataloader = build_dataloader(args, Mode.EVAL)
startup_prog = paddle.static.Program()
eval_prog = paddle.static.Program()
eval_fetchs, _, eval_feeds, _ = program.build(
args,
eval_prog,
startup_prog,
step_each_epoch=len(eval_dataloader),
is_train=False)
eval_prog = eval_prog.clone(for_test=True)
device = paddle.set_device('gpu')
exe = paddle.static.Executor(device)
exe.run(startup_prog)
path_to_ckpt = args.from_checkpoint
if path_to_ckpt is None:
logging.warning(
'The --from-checkpoint is not set, model weights will not be initialize.'
)
else:
init_ckpt(path_to_ckpt, eval_prog, exe)
logging.info('Checkpoint path is %s', path_to_ckpt)
save_inference_dir = args.trt_inference_dir
paddle.static.save_inference_model(
path_prefix=os.path.join(save_inference_dir, args.model_arch_name),
feed_vars=[eval_feeds['data']],
fetch_vars=[eval_fetchs['label'][0]],
executor=exe,
program=eval_prog)
logging.info('Successully export inference model to %s',
save_inference_dir)
if __name__ == '__main__':
paddle.enable_static()
main(parse_args(including_trt=True))
|
PyTorch/SpeechRecognition/Jasper | Jasper | inference | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import os
import random
import time
from heapq import nlargest
from itertools import chain, repeat
from pathlib import Path
from tqdm import tqdm
import dllogger
import torch
import numpy as np
import torch.distributed as distrib
import torch.nn.functional as F
from apex import amp
from apex.parallel import DistributedDataParallel
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
from jasper import config
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import (AudioDataset, FilelistDataset, get_data_loader,
SingleAudioDataset)
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import print_once, process_evaluation_epoch
from jasper.model import GreedyCTCDecoder, Jasper
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
def get_parser():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument('--batch_size', default=16, type=int,
help='Data batch size')
parser.add_argument('--steps', default=0, type=int,
help='Eval this many steps for every worker')
parser.add_argument('--warmup_steps', default=0, type=int,
help='Burn-in period before measuring latencies')
parser.add_argument('--model_config', type=str, required=True,
help='Relative model config path given dataset folder')
parser.add_argument('--dataset_dir', type=str,
help='Absolute path to dataset folder')
parser.add_argument('--val_manifests', type=str, nargs='+',
help='Relative path to evaluation dataset manifest files')
parser.add_argument('--ckpt', default=None, type=str,
help='Path to model checkpoint')
parser.add_argument('--pad_leading', type=int, default=16,
help='Pads every batch with leading zeros '
'to counteract conv shifts of the field of view')
parser.add_argument('--amp', '--fp16', action='store_true',
help='Use FP16 precision')
parser.add_argument('--cudnn_benchmark', action='store_true',
help='Enable cudnn benchmark')
parser.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument("--seed", default=None, type=int, help='Random seed')
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0),
type=int, help='GPU id used for distributed training')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--save_predictions', type=str, default=None,
help='Save predictions in text form at this location')
io.add_argument('--save_logits', default=None, type=str,
help='Save output logits under specified path')
io.add_argument('--transcribe_wav', type=str,
help='Path to a single .wav file (16KHz)')
io.add_argument('--transcribe_filelist', type=str,
help='Path to a filelist with one .wav path per line')
io.add_argument('-o', '--output_dir', default='results/',
help='Output folder to save audio (file per phrase)')
io.add_argument('--log_file', type=str, default=None,
help='Path to a DLLogger log file')
io.add_argument('--ema', action='store_true',
help='Load averaged model weights')
io.add_argument('--torchscript', action='store_true',
help='Evaluate with a TorchScripted model')
io.add_argument('--torchscript_export', action='store_true',
help='Export the model with torch.jit to the output_dir')
io.add_argument('--override_config', type=str, action='append',
help='Overrides a value from a config .yaml.'
' Syntax: `--override_config nested.config.key=val`.')
return parser
def durs_to_percentiles(durations, ratios):
durations = np.asarray(durations) * 1000 # in ms
latency = durations
latency = latency[5:]
mean_latency = np.mean(latency)
latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency)
latency_ranges = get_percentile(ratios, latency_worst, len(latency))
latency_ranges[0.5] = mean_latency
return latency_ranges
def get_percentile(ratios, arr, nsamples):
res = {}
for a in ratios:
idx = max(int(nsamples * (1 - a)), 0)
res[a] = arr[idx]
return res
def torchscript_export(data_loader, audio_processor, model, greedy_decoder,
output_dir, use_amp, use_conv_masks, model_config, device,
save):
audio_processor.to(device)
for batch in data_loader:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_len, _, _ = batch
feats, feat_lens = audio_processor(audio, audio_len)
break
print("\nExporting featurizer...")
print("\nNOTE: Dithering causes warnings about non-determinism.\n")
ts_feat = torch.jit.trace(audio_processor, (audio, audio_len))
print("\nExporting acoustic model...")
model(feats, feat_lens)
ts_acoustic = torch.jit.trace(model, (feats, feat_lens))
print("\nExporting decoder...")
log_probs = model(feats, feat_lens)
ts_decoder = torch.jit.script(greedy_decoder, log_probs)
print("\nJIT export complete.")
if save:
precision = "fp16" if use_amp else "fp32"
module_name = f'{os.path.basename(model_config)}_{precision}'
ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt"))
ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt"))
ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt"))
return ts_feat, ts_acoustic, ts_decoder
def main():
parser = get_parser()
args = parser.parse_args()
log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
[dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
for step in ['DNN', 'data+DNN', 'data']:
for c in [0.99, 0.95, 0.9, 0.5]:
cs = 'avg' if c == 0.5 else f'{int(100*c)}%'
dllogger.metadata(f'{step.lower()}_latency_{c}',
{'name': f'{step} latency {cs}',
'format': ':>7.2f', 'unit': 'ms'})
dllogger.metadata(
'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'})
if args.cpu:
device = torch.device('cpu')
else:
assert torch.cuda.is_available()
device = torch.device('cuda')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
# set up distributed training
multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
distrib.init_process_group(backend='nccl', init_method='env://')
print_once(f'Inference with {distrib.get_world_size()} GPUs')
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
use_dali = args.dali_device in ('cpu', 'gpu')
dataset_kw, features_kw = config.input(cfg, 'val')
measure_perf = args.steps > 0
# dataset
if args.transcribe_wav or args.transcribe_filelist:
if use_dali:
print("DALI supported only with input .json files; disabling")
use_dali = False
assert not (args.transcribe_wav and args.transcribe_filelist)
if args.transcribe_wav:
dataset = SingleAudioDataset(args.transcribe_wav)
else:
dataset = FilelistDataset(args.transcribe_filelist)
data_loader = get_data_loader(dataset,
batch_size=1,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=0,
drop_last=(True if measure_perf else False))
_, features_kw = config.input(cfg, 'val')
assert not features_kw['pad_to_max_duration']
feat_proc = FilterbankFeatures(**features_kw)
elif use_dali:
# pad_to_max_duration is not supported by DALI - have simple padders
if features_kw['pad_to_max_duration']:
feat_proc = BaseFeatures(
pad_align=features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=features_kw['max_duration'],
sample_rate=features_kw['sample_rate'],
window_size=features_kw['window_size'],
window_stride=features_kw['window_stride'])
features_kw['pad_to_max_duration'] = False
else:
feat_proc = None
data_loader = DaliDataLoader(
gpu_id=args.local_rank or 0,
dataset_path=args.dataset_dir,
config_data=dataset_kw,
config_features=features_kw,
json_names=args.val_manifests,
batch_size=args.batch_size,
pipeline_type=("train" if measure_perf else "val"), # no drop_last
device_type=args.dali_device,
symbols=symbols)
else:
dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**dataset_kw)
data_loader = get_data_loader(dataset,
args.batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
feat_proc = FilterbankFeatures(**features_kw)
model = Jasper(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
if args.ckpt is not None:
print(f'Loading the model from {args.ckpt} ...')
checkpoint = torch.load(args.ckpt, map_location="cpu")
key = 'ema_state_dict' if args.ema else 'state_dict'
state_dict = helpers.convert_v1_state_dict(checkpoint[key])
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
if feat_proc is not None:
feat_proc.to(device)
feat_proc.eval()
if args.amp:
model = model.half()
if args.torchscript:
greedy_decoder = GreedyCTCDecoder()
feat_proc, model, greedy_decoder = torchscript_export(
data_loader, feat_proc, model, greedy_decoder, args.output_dir,
use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml,
device=device, save=args.torchscript_export)
if multi_gpu:
model = DistributedDataParallel(model)
agg = {'txts': [], 'preds': [], 'logits': []}
dur = {'data': [], 'dnn': [], 'data+dnn': []}
looped_loader = chain.from_iterable(repeat(data_loader))
greedy_decoder = GreedyCTCDecoder()
sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
steps = args.steps + args.warmup_steps or len(data_loader)
with torch.no_grad():
for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)):
if use_dali:
feats, feat_lens, txt, txt_lens = batch
if feat_proc is not None:
feats, feat_lens = feat_proc(feats, feat_lens)
else:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feats, feat_lens = feat_proc(audio, audio_lens)
sync()
t1 = time.time()
if args.amp:
feats = feats.half()
feats = F.pad(feats, (args.pad_leading, 0))
feat_lens += args.pad_leading
if model.encoder.use_conv_masks:
log_probs, log_prob_lens = model(feats, feat_lens)
else:
log_probs = model(feats, feat_lens)
preds = greedy_decoder(log_probs)
sync()
t2 = time.time()
# burn-in period; wait for a new loader due to num_workers
if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
dur['data'].append(t1 - t0)
dur['dnn'].append(t2 - t1)
dur['data+dnn'].append(t2 - t0)
if txt is not None:
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens],
symbols)
agg['preds'] += helpers.gather_predictions([preds], symbols)
agg['logits'].append(log_probs)
if it + 1 == steps:
break
sync()
t0 = time.time()
# communicate the results
if args.transcribe_wav:
for idx, p in enumerate(agg['preds']):
print_once(f'Prediction {idx+1: >3}: {p}')
elif args.transcribe_filelist:
pass
elif not multi_gpu or distrib.get_rank() == 0:
wer, _ = process_evaluation_epoch(agg)
dllogger.log(step=(), data={'eval_wer': 100 * wer})
if args.save_predictions:
with open(args.save_predictions, 'w') as f:
f.write('\n'.join(agg['preds']))
if args.save_logits:
logits = torch.cat(agg['logits'], dim=0).cpu()
torch.save(logits, args.save_logits)
# report timings
if len(dur['data']) >= 20:
ratios = [0.9, 0.95, 0.99]
for stage in dur:
lat = durs_to_percentiles(dur[stage], ratios)
for k in [0.99, 0.95, 0.9, 0.5]:
kk = str(k).replace('.', '_')
dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]})
else:
print_once('Not enough samples to measure latencies.')
if __name__ == "__main__":
main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/evaluator | evaluator | xgbevaluator | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: evaluators.evaluator.XGBMetricEvaluator
config:
output_selector: 0
save_predictions: false
metrics:
- MSE
- MAE
- RMSE
- SMAPE
use_weights: False
|
TensorFlow/Segmentation/UNet_Medical/utils | utils | parse_results | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import argparse
def process_performance_stats(timestamps, batch_size, mode):
""" Get confidence intervals
:param timestamps: Collection of timestamps
:param batch_size: Number of samples per batch
:param mode: Estimator's execution mode
:return: Stats
"""
timestamps_ms = 1000 * timestamps
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = {f"throughput_{mode}": throughput_imgps,
f"latency_{mode}_mean": timestamps_ms.mean()}
for level in [90, 95, 99]:
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
return stats
def parse_convergence_results(path, environment):
dice_scores = []
ce_scores = []
logfiles = [f for f in os.listdir(path) if "log" in f and environment in f]
if not logfiles:
raise FileNotFoundError("No logfile found at {}".format(path))
for logfile in logfiles:
with open(os.path.join(path, logfile), "r") as f:
content = f.readlines()[-1]
if "eval_dice_score" not in content:
print("Evaluation score not found. The file", logfile, "might be corrupted.")
continue
dice_scores.append(float([val for val in content.split(" ")
if "eval_dice_score" in val][0].split()[-1]))
ce_scores.append(float([val for val in content.split(" ")
if "eval_ce_loss" in val][0].split()[-1]))
if dice_scores:
print("Evaluation dice score:", sum(dice_scores) / len(dice_scores))
print("Evaluation cross-entropy loss:", sum(ce_scores) / len(ce_scores))
else:
print("All logfiles were corrupted, no loss was obtained.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="UNet-medical-utils")
parser.add_argument('--exec_mode',
choices=['convergence', 'benchmark'],
type=str,
help="""Which execution mode to run the model into""")
parser.add_argument('--model_dir',
type=str,
required=True)
parser.add_argument('--env',
choices=['FP32_1GPU', 'FP32_8GPU', 'TF-AMP_1GPU', 'TF-AMP_8GPU'],
type=str,
required=True)
args = parser.parse_args()
if args.exec_mode == 'convergence':
parse_convergence_results(path=args.model_dir, environment=args.env)
elif args.exec_mode == 'benchmark':
pass
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts | scripts | get_data | #!/bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATAPATH='/data'
declare -A URLS=( ['electricity']='https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip'
['volatility']='https://realized.oxford-man.ox.ac.uk/images/oxfordmanrealizedvolatilityindices.zip'
['traffic']='https://archive.ics.uci.edu/ml/machine-learning-databases/00204/PEMS-SF.zip'
)
mkdir -p ${DATAPATH}/raw
mkdir -p ${DATAPATH}/processed
for DS in electricity volatility traffic
do
DS_PATH=${DATAPATH}/raw/${DS}
ZIP_FNAME=${DS_PATH}.zip
if [ ! -d ${DS_PATH} ]
then
wget "${URLS[${DS}]}" -O ${ZIP_FNAME}
unzip ${ZIP_FNAME} -d ${DS_PATH}
fi
python -c "from data_utils import standarize_${DS} as standarize; standarize(\"${DS_PATH}\")"
python -c "from data_utils import preprocess; \
from configuration import ${DS^}Config as Config; \
preprocess(\"${DS_PATH}/standarized.csv\", \"${DATAPATH}/processed/${DS}_bin\", Config())"
done
FAVORITA_ZIP="favorita-grocery-sales-forecasting.zip"
DS_PATH=${DATAPATH}/raw/favorita
if [ ! -f ${DS_PATH}/${FAVORITA_ZIP} ]
then
echo ${DS_PATH} not found. Please download the favorita dataset from https://www.kaggle.com/c/favorita-grocery-sales-forecasting/data
exit 1
fi
unzip ${DS_PATH}/${FAVORITA_ZIP} -d ${DS_PATH}
for F in `ls ${DATAPATH}/raw/favorita`
do
7z e ${DS_PATH}/${F} -o${DS_PATH}
done
python -c "from data_utils import standarize_favorita as standarize; standarize(\"${DS_PATH}\")"
python -c "from data_utils import preprocess; \
from configuration import FavoritaConfig as Config; \
preprocess(\"${DS_PATH}/standarized.csv\", \"${DATAPATH}/processed/favorita_bin\", Config())"
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/runtime | runtime | callbacks | import logging
import time
from collections import defaultdict
import numpy as np
import tensorflow as tf
from mrcnn_tf2.utils.keras import KerasCallback
CONFIDENCE_INTERVAL_Z = {
80.0: 1.282,
85.0: 1.440,
90.0: 1.645,
95.0: 1.960,
99.0: 2.576,
99.5: 2.807,
99.9: 3.291,
}
class DLLoggerMetricsCallback(KerasCallback):
"""
Keras callback that saves metrics using DLLogger.
"""
def __init__(self, dllogger, log_every=10, log_learning_rate=False):
"""
Args:
dllogger (DLLogger): DLLogger instance.
log_every (int): Logging interval.
log_learning_rate (bool): When set to true adds learning rate to metrics.
Cannot be used with AMP enabled as the used hack fails with AMP.
"""
super().__init__()
self._dllogger = dllogger
self._log_every = log_every
self._log_learning_rate = log_learning_rate
if not isinstance(log_every, dict):
self._log_every = defaultdict(lambda: log_every)
self._dllogger.metadata('loss', {'unit': None})
self._dllogger.metadata('AP', {'unit': None})
self._dllogger.metadata('mask_AP', {'unit': None})
logging.getLogger('hooks').info('Created metrics logging hook')
def on_any_batch_end(self, mode, epoch, batch, logs):
if (batch + 1) % self._log_every[mode] != 0:
return
step = (None if epoch is None else epoch + 1, batch + 1)
self._log_metrics(mode, logs, step=step)
def on_any_epoch_end(self, mode, epoch, logs):
step = (None if epoch is None else epoch + 1, )
self._log_metrics(mode, logs, step=step)
def on_any_end(self, mode, logs):
self._log_metrics(mode, logs)
def _log_metrics(self, mode, logs, step=tuple()):
logs = logs or {}
# remove outputs that are not in fact a metric
logs.pop('outputs', None)
if mode == 'train' and self._log_learning_rate:
logs['learning_rate'] = float(self.model.optimizer._decayed_lr(tf.float32))
# no point in logging with empty data
if not logs:
return
self._dllogger.log(step=step, data=logs)
class DLLoggerPerfCallback(KerasCallback):
"""
Keras callback that measures performance and logs it using DLLogger.
"""
def __init__(self, dllogger, batch_sizes, warmup_steps=0, log_every=None):
super().__init__()
self._dllogger = dllogger
self._batch_sizes = batch_sizes
self._warmup_steps = warmup_steps
self._log_every = log_every
if not isinstance(batch_sizes, dict):
self._batch_sizes = defaultdict(lambda: batch_sizes)
if not isinstance(warmup_steps, dict):
self._warmup_steps = defaultdict(lambda: warmup_steps)
if not isinstance(log_every, dict):
self._log_every = defaultdict(lambda: log_every)
self._deltas = {}
self._batch_timestamps = {}
self._start_timestamps = {}
for mode in ['train', 'test', 'predict']:
self._dllogger.metadata(f'{mode}_throughput', {'unit': 'images/s'})
self._dllogger.metadata(f'{mode}_latency', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_90', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_95', {'unit': 's'})
self._dllogger.metadata(f'{mode}_latency_99', {'unit': 's'})
self._dllogger.metadata(f'{mode}_time', {'unit': 's'})
self._logger = logging.getLogger('hooks')
self._logger.info('Created perf logging hooks')
def on_any_begin(self, mode, logs):
self._deltas[mode] = []
self._start_timestamps[mode] = time.time()
def on_any_batch_begin(self, mode, epoch, batch, logs):
self._batch_timestamps[mode] = time.time()
def on_any_batch_end(self, mode, epoch, batch, logs):
self._deltas[mode].append(time.time() - self._batch_timestamps[mode])
if self._log_every[mode] and (batch + 1) % self._log_every[mode] != 0:
return
step = (None if epoch is None else epoch + 1, batch + 1)
self._log_perf(self._deltas[mode][-self._log_every[mode]:], mode, step=step)
def on_any_end(self, mode, logs):
if len(self._deltas[mode]) > self._warmup_steps[mode]:
self._log_perf(self._deltas[mode][self._warmup_steps[mode]:], mode)
else:
self._logger.warning(
f'Number of all {mode} steps was smaller then number of warm up steps, '
f'no stats were collected.'
)
def _log_perf(self, deltas, mode, step=tuple()):
deltas = np.array(deltas)
self._dllogger.log(
step=step,
data={
f'{mode}_throughput': self._calculate_throughput(deltas, self._batch_sizes[mode]),
f'{mode}_latency': self._calculate_latency(deltas),
f'{mode}_latency_90': self._calculate_latency_confidence(deltas, 90.0),
f'{mode}_latency_95': self._calculate_latency_confidence(deltas, 95.0),
f'{mode}_latency_99': self._calculate_latency_confidence(deltas, 99.0),
f'{mode}_time': self._calculate_total_time(self._start_timestamps[mode], time.time())
}
)
@staticmethod
def _calculate_throughput(deltas, batch_size):
return batch_size / deltas.mean()
@staticmethod
def _calculate_latency(deltas):
return deltas.mean()
@staticmethod
def _calculate_latency_confidence(deltas, confidence_interval):
mean = deltas.mean()
std = deltas.std()
n = len(deltas)
z = CONFIDENCE_INTERVAL_Z[confidence_interval]
return mean + (z * std / np.sqrt(n))
@staticmethod
def _calculate_total_time(start_time, end_time):
return end_time - start_time
class PretrainedWeightsLoadingCallback(KerasCallback):
"""
Loads pretrained weights from given checkpoint after first batch.
"""
def __init__(self, checkpoint_path, mapping=None):
"""
Args:
checkpoint_path: Path to the checkpoint, as accepted by `tf.train.load_checkpoint()`
mapping: Callable that takes name of a variable and returns name of a corresponding
entry in the checkpoint.
"""
super().__init__()
self._checkpoint_path = checkpoint_path
self._mapping = mapping or (lambda x: x)
self._loaded = False
self._logger = logging.getLogger('hooks')
self._logger.info(f'Created pretrained backbone weights loading hook that loads from {checkpoint_path}')
def on_train_batch_end(self, batch, logs=None):
super().on_train_batch_end(batch, logs)
if not self._loaded:
self.load_weights()
self._loaded = True
def load_weights(self):
reader = tf.train.load_checkpoint(self._checkpoint_path)
variable_mapping = {
self._mapping(var.name): var
for var in self.model.variables
if reader.has_tensor(self._mapping(var.name))
}
for cp_name, var in variable_mapping.items():
var.assign(reader.get_tensor(cp_name))
self._logger.debug(f'Assigned "{cp_name}" from checkpoint to "{var.name}"')
self._logger.info(f'Loaded {len(variable_mapping)} pretrained backbone variables')
|
PyTorch/SpeechSynthesis/FastPitch/fastpitch | fastpitch | alignment | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit, prange
@jit(nopython=True)
def mas(log_attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(log_attn_map)
log_attn_map = log_attn_map.copy()
log_attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(log_attn_map)
log_p[0, :] = log_attn_map[0, :]
prev_ind = np.zeros_like(log_attn_map, dtype=np.int64)
for i in range(1, log_attn_map.shape[0]):
for j in range(log_attn_map.shape[1]): # for each text dim
prev_j = np.arange(max(0, j-width), j+1)
prev_log = np.array([log_p[i-1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = log_attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = log_attn_map.shape[1]-1
for i in range(log_attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True)
def mas_width1(log_attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
neg_inf = log_attn_map.dtype.type(-np.inf)
log_p = log_attn_map.copy()
log_p[0, 1:] = neg_inf
for i in range(1, log_p.shape[0]):
prev_log1 = neg_inf
for j in range(log_p.shape[1]):
prev_log2 = log_p[i-1, j]
log_p[i, j] += max(prev_log1, prev_log2)
prev_log1 = prev_log2
# now backtrack
opt = np.zeros_like(log_p)
one = opt.dtype.type(1)
j = log_p.shape[1]-1
for i in range(log_p.shape[0]-1, 0, -1):
opt[i, j] = one
if log_p[i-1, j-1] >= log_p[i-1, j]:
j -= 1
if j == 0:
opt[1:i, j] = one
break
opt[0, j] = one
return opt
@jit(nopython=True, parallel=True)
def b_mas(b_log_attn_map, in_lens, out_lens, width=1):
assert width == 1
attn_out = np.zeros_like(b_log_attn_map)
for b in prange(b_log_attn_map.shape[0]):
out = mas_width1(b_log_attn_map[b, 0, :out_lens[b], :in_lens[b]])
attn_out[b, 0, :out_lens[b], :in_lens[b]] = out
return attn_out
|
PyTorch/Translation/Transformer/fairseq | fairseq | __init__ | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .multiprocessing_pdb import pdb
__all__ = ['pdb']
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/models/layers | layers | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
TensorFlow2/Detection/Efficientdet/model | model | nms_np | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import numpy as np
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
def diou_nms(dets, iou_thresh=None):
"""DIOU non-maximum suppression.
diou = iou - square of euclidian distance of box centers
/ square of diagonal of smallest enclosing bounding box
Reference: https://arxiv.org/pdf/1911.08287.pdf
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[order[1:]] - intersection)
smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]])
smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]])
smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]])
smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]])
square_of_the_diagonal = (
(smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 +
(smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2)
square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 +
(center_y[i] - center_y[order[1:]])**2)
# Add 1e-10 for numerical stability.
diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10)
inds = np.where(diou <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def hard_nms(dets, iou_thresh=None):
"""The basic hard non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def soft_nms(dets, nms_configs):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = nms_configs['method']
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = nms_configs['sigma'] or 0.5
iou_thresh = nms_configs['iou_thresh'] or 0.3
score_thresh = nms_configs['score_thresh'] or 0.001
x1 = np.float32(dets[:, 0])
y1 = np.float32(dets[:, 1])
x2 = np.float32(dets[:, 2])
y2 = np.float32(dets[:, 3])
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def nms(dets, nms_configs):
"""Non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain parameters.
Returns:
numpy.array: Retained boxes.
"""
nms_configs = nms_configs or {}
method = nms_configs['method']
if method == 'hard' or not method:
return hard_nms(dets, nms_configs['iou_thresh'])
if method == 'diou':
return diou_nms(dets, nms_configs['iou_thresh'])
if method in ('linear', 'gaussian'):
return soft_nms(dets, nms_configs)
raise ValueError('Unknown NMS method: {}'.format(method))
def per_class_nms(boxes, scores, classes, image_id, image_scale, num_classes,
max_boxes_to_draw, nms_configs):
"""Perform per class nms."""
boxes = boxes[:, [1, 0, 3, 2]]
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detections_cls = nms(all_detections_cls, nms_configs)
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detections_cls)),
top_detections_cls,
np.repeat(c + 1, len(top_detections_cls)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections |
TensorFlow/Detection/SSD/models/research/object_detection/inference | inference | detection_inference | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for detection inference."""
from __future__ import division
import tensorflow as tf
from object_detection.core import standard_fields
def build_input(tfrecord_paths):
"""Builds the graph's input.
Args:
tfrecord_paths: List of paths to the input TFRecords
Returns:
serialized_example_tensor: The next serialized example. String scalar Tensor
image_tensor: The decoded image of the example. Uint8 tensor,
shape=[1, None, None,3]
"""
filename_queue = tf.train.string_input_producer(
tfrecord_paths, shuffle=False, num_epochs=1)
tf_record_reader = tf.TFRecordReader()
_, serialized_example_tensor = tf_record_reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example_tensor,
features={
standard_fields.TfExampleFields.image_encoded:
tf.FixedLenFeature([], tf.string),
})
encoded_image = features[standard_fields.TfExampleFields.image_encoded]
image_tensor = tf.image.decode_image(encoded_image, channels=3)
image_tensor.set_shape([None, None, 3])
image_tensor = tf.expand_dims(image_tensor, 0)
return serialized_example_tensor, image_tensor
def build_inference_graph(image_tensor, inference_graph_path):
"""Loads the inference graph and connects it to the input image.
Args:
image_tensor: The input image. uint8 tensor, shape=[1, None, None, 3]
inference_graph_path: Path to the inference graph with embedded weights
Returns:
detected_boxes_tensor: Detected boxes. Float tensor,
shape=[num_detections, 4]
detected_scores_tensor: Detected scores. Float tensor,
shape=[num_detections]
detected_labels_tensor: Detected labels. Int64 tensor,
shape=[num_detections]
"""
with tf.gfile.Open(inference_graph_path, 'rb') as graph_def_file:
graph_content = graph_def_file.read()
graph_def = tf.GraphDef()
graph_def.MergeFromString(graph_content)
tf.import_graph_def(
graph_def, name='', input_map={'image_tensor': image_tensor})
g = tf.get_default_graph()
num_detections_tensor = tf.squeeze(
g.get_tensor_by_name('num_detections:0'), 0)
num_detections_tensor = tf.cast(num_detections_tensor, tf.int32)
detected_boxes_tensor = tf.squeeze(
g.get_tensor_by_name('detection_boxes:0'), 0)
detected_boxes_tensor = detected_boxes_tensor[:num_detections_tensor]
detected_scores_tensor = tf.squeeze(
g.get_tensor_by_name('detection_scores:0'), 0)
detected_scores_tensor = detected_scores_tensor[:num_detections_tensor]
detected_labels_tensor = tf.squeeze(
g.get_tensor_by_name('detection_classes:0'), 0)
detected_labels_tensor = tf.cast(detected_labels_tensor, tf.int64)
detected_labels_tensor = detected_labels_tensor[:num_detections_tensor]
return detected_boxes_tensor, detected_scores_tensor, detected_labels_tensor
def infer_detections_and_add_to_example(
serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor, discard_image_pixels):
"""Runs the supplied tensors and adds the inferred detections to the example.
Args:
serialized_example_tensor: Serialized TF example. Scalar string tensor
detected_boxes_tensor: Detected boxes. Float tensor,
shape=[num_detections, 4]
detected_scores_tensor: Detected scores. Float tensor,
shape=[num_detections]
detected_labels_tensor: Detected labels. Int64 tensor,
shape=[num_detections]
discard_image_pixels: If true, discards the image from the result
Returns:
The de-serialized TF example augmented with the inferred detections.
"""
tf_example = tf.train.Example()
(serialized_example, detected_boxes, detected_scores,
detected_classes) = tf.get_default_session().run([
serialized_example_tensor, detected_boxes_tensor, detected_scores_tensor,
detected_labels_tensor
])
detected_boxes = detected_boxes.T
tf_example.ParseFromString(serialized_example)
feature = tf_example.features.feature
feature[standard_fields.TfExampleFields.
detection_score].float_list.value[:] = detected_scores
feature[standard_fields.TfExampleFields.
detection_bbox_ymin].float_list.value[:] = detected_boxes[0]
feature[standard_fields.TfExampleFields.
detection_bbox_xmin].float_list.value[:] = detected_boxes[1]
feature[standard_fields.TfExampleFields.
detection_bbox_ymax].float_list.value[:] = detected_boxes[2]
feature[standard_fields.TfExampleFields.
detection_bbox_xmax].float_list.value[:] = detected_boxes[3]
feature[standard_fields.TfExampleFields.
detection_class_label].int64_list.value[:] = detected_classes
if discard_image_pixels:
del feature[standard_fields.TfExampleFields.image_encoded]
return tf_example
|
PyTorch/SpeechSynthesis/Tacotron2/tensorrt | tensorrt | trt_utils | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import tensorrt as trt
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def is_shape_dynamic(shape):
return any([is_dimension_dynamic(dim) for dim in shape])
def run_trt_engine(context, engine, tensors):
bindings = [None]*engine.num_bindings
for name,tensor in tensors['inputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
if engine.is_shape_binding(idx) and is_shape_dynamic(context.get_shape(idx)):
context.set_shape_input(idx, tensor)
elif is_shape_dynamic(engine.get_binding_shape(idx)):
context.set_binding_shape(idx, tensor.shape)
for name,tensor in tensors['outputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
context.execute_v2(bindings=bindings)
def load_engine(engine_filepath, trt_logger):
with open(engine_filepath, "rb") as f, trt.Runtime(trt_logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def engine_info(engine_filepath):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
engine = load_engine(engine_filepath, TRT_LOGGER)
binding_template = r"""
{btype} {{
name: "{bname}"
data_type: {dtype}
dims: {dims}
}}"""
type_mapping = {"DataType.HALF": "TYPE_FP16",
"DataType.FLOAT": "TYPE_FP32",
"DataType.INT32": "TYPE_INT32",
"DataType.BOOL" : "TYPE_BOOL"}
print("engine name", engine.name)
print("has_implicit_batch_dimension", engine.has_implicit_batch_dimension)
start_dim = 0 if engine.has_implicit_batch_dimension else 1
print("num_optimization_profiles", engine.num_optimization_profiles)
print("max_batch_size:", engine.max_batch_size)
print("device_memory_size:", engine.device_memory_size)
print("max_workspace_size:", engine.max_workspace_size)
print("num_layers:", engine.num_layers)
for i in range(engine.num_bindings):
btype = "input" if engine.binding_is_input(i) else "output"
bname = engine.get_binding_name(i)
dtype = engine.get_binding_dtype(i)
bdims = engine.get_binding_shape(i)
config_values = {
"btype": btype,
"bname": bname,
"dtype": type_mapping[str(dtype)],
"dims": list(bdims[start_dim:])
}
final_binding_str = binding_template.format_map(config_values)
print(final_binding_str)
def build_engine(model_file, shapes, max_ws=512*1024*1024, fp16=False):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16
config = builder.create_builder_config()
config.max_workspace_size = max_ws
if fp16:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
for s in shapes:
profile.set_shape(s['name'], min=s['min'], opt=s['opt'], max=s['max'])
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, 'rb') as model:
parsed = parser.parse(model.read())
for i in range(parser.num_errors):
print("TensorRT ONNX parser error:", parser.get_error(i))
engine = builder.build_engine(network, config=config)
return engine
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/model_analyzer | model_analyzer | __init__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401
from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
|
TensorFlow/Segmentation/UNet_Medical | UNet_Medical | README | # UNet Medical Image Segmentation for TensorFlow 1.x
This repository provides a script and recipe to train UNet Medical to achieve state of the art accuracy, and is tested and maintained by NVIDIA.
UNet model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider other PyTorch or TensorFlow2 models as a substitute for your requirements.
## Table of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The UNet model is a convolutional neural network for 2D image segmentation. This repository contains a UNet implementation as described in the original paper [UNet: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597), without any alteration.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2.2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
UNet was first introduced by Olaf Ronneberger, Philip Fischer, and Thomas Brox in the paper: [UNet: Convolutional Networks for Biomedical Image Segmentation](https://arxiv.org/abs/1505.04597). UNet allows for seamless segmentation of 2D images, with high accuracy and performance, and can be adapted to solve many different segmentation problems.
The following figure shows the construction of the UNet model and its different components. UNet is composed of a contractive and an expanding path, that aims at building a bottleneck in its centermost part through a combination of convolution and pooling operations. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients in order to improve the training.
![UNet](images/unet.png)
Figure 1. UNet architecture
### Default configuration
UNet consists of a contractive (left-side) and expanding (right-side) path. It repeatedly applies unpadded convolutions followed by max pooling for downsampling. Every step in the expanding path consists of an upsampling of the feature maps and a concatenation with the correspondingly cropped feature map from the contractive path.
### Feature support matrix
The following features are supported by this model.
| **Feature** | **UNet Medical** |
|---------------------------------|-----|
| Automatic mixed precision (AMP) | Yes |
| Horovod Multi-GPU (NCCL) | Yes |
| Accelerated Linear Algebra (XLA)| Yes |
#### Features
**Automatic Mixed Precision (AMP)**
This implementation of UNet uses AMP to implement mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying just a few lines of code.
**Horovod**
Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and MXNet. The goal of Horovod is to make distributed deep learning fast and easy to use. For more information about how to get started with Horovod, see the [Horovod: Official repository](https://github.com/horovod/horovod).
**Multi-GPU training with Horovod**
Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage).
**XLA support (experimental)**
XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. The results are improvements in speed and memory usage: most internal benchmarks run ~1.1-1.5x faster after XLA is enabled.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architecture, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
#### Enabling mixed precision
Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models.
To enable mixed precision, you can simply add the values to the environmental variables inside your training script:
- Enable TF-AMP graph rewrite:
```
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
```
- Enable Automated Mixed Precision:
```
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements in order to start training the UNet Medical model.
### Requirements
This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow)
- GPU-based architecture:
- [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
- [Running TensorFlow](https://docs.nvidia.com/deeplearning/dgx/tensorflow-release-notes/running.html#running)
For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the UNet model on the [EM segmentation challenge dataset](http://brainiac2.mit.edu/isbi_challenge/home). These steps enable you to build the UNet TensorFlow NGC container, train and evaluate your model, and generate predictions on the test data. Furthermore, you can then choose to:
* compare your evaluation accuracy with our [Training accuracy results](#training-accuracy-results),
* compare your training performance with our [Training performance benchmark](#training-performance-benchmark),
* compare your inference performance with our [Inference performance benchmark](#inference-performance-benchmark).
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. Clone the repository.
Executing this command will create your local repository with all the code to run UNet.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow/Segmentation/UNet_Medical_TF
2. Build the UNet TensorFlow NGC container.
This command will use the `Dockerfile` to create a Docker image named `unet_tf`, downloading all the required components automatically.
```
docker build -t unet_tf .
```
The NGC container contains all the components optimized for usage on NVIDIA hardware.
3. Start an interactive session in the NGC container to run preprocessing/training/inference.
The following command will launch the container and mount the `./data` directory as a volume to the `/data` directory inside the container, and `./results` directory to the `/results` directory in the container.
```bash
mkdir data
mkdir results
docker run --runtime=nvidia -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --rm --ipc=host -v ${PWD}/data:/data -v ${PWD}/results:/results unet_tf:latest /bin/bash
```
Any datasets and experiment results (logs, checkpoints, etc.) saved to `/data` or `/results` will be accessible
in the `./data` or `./results` directory on the host, respectively.
4. Download and preprocess the data.
The UNet script `main.py` operates on data from the [ISBI Challenge](http://brainiac2.mit.edu/isbi_challenge/home), the dataset originally employed in the [UNet paper](https://arxiv.org/abs/1505.04597). The data is available to download upon registration on the website.
Training and test data are composed of 3 multi-page `TIF` files, each containing 30 2D-images (around 30 Mb total). Once downloaded, the data can be used to run the training and benchmark scripts described below, by pointing `main.py` to its location using the `--data_dir` flag.
**Note:** Masks are only provided for training data.
5. Start training.
After the Docker container is launched, the training with the [default hyperparameters](#default-parameters) (for example 1/8 GPUs FP32/TF-AMP) can be started with:
```bash
bash examples/unet{_TF-AMP}_{1,8}GPU.sh <path/to/dataset> <path/to/checkpoint>
```
For example, to run with full precision (FP32) on 1 GPU from the project’s folder, simply use:
```bash
bash examples/unet_1GPU.sh /data /results
```
This script will launch a training on a single fold and store the model’s checkpoint in <path/to/checkpoint> directory.
The script can be run directly by modifying flags if necessary, especially the number of GPUs, which is defined after the `-np` flag. Since the test volume does not have labels, 20% of the training data is used for validation in 5-fold cross-validation manner. The number of fold can be changed using `--crossvalidation_idx` with an integer in range 0-4. For example, to run with 4 GPUs using fold 1 use:
```bash
horovodrun -np 4 python main.py --data_dir /data --model_dir /results --batch_size 1 --exec_mode train --crossvalidation_idx 1 --xla --amp
```
Training will result in a checkpoint file being written to `./results` on the host machine.
6. Start validation/evaluation.
The trained model can be evaluated by passing the `--exec_mode evaluate` flag. Since evaluation is carried out on a validation dataset, the `--crossvalidation_idx` parameter should be filled. For example:
```bash
python main.py --data_dir /data --model_dir /results --batch_size 1 --exec_mode evaluate --crossvalidation_idx 0 --xla --amp
```
Evaluation can also be triggered jointly after training by passing the `--exec_mode train_and_evaluate` flag.
7. Start inference/predictions.
To run inference on a checkpointed model, run:
```bash
bash examples/unet_INFER{_TF-AMP}.sh <path/to/dataset> <path/to/checkpoint>
```
For example:
```bash
bash examples/unet_INFER_FP32.sh /data /results
```
Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark the performance of your training [Training performance benchmark](#training-performance-benchmark), or [Inference performance benchmark](#inference-performance-benchmark). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
* `main.py`: Serves as the entry point to the application.
* `Dockerfile`: Container with the basic set of dependencies to run UNet.
* `requirements.txt`: Set of extra requirements for running UNet.
The `utils/` folder encapsulates the necessary tools to train and perform inference using UNet. Its main components are:
* `cmd_util.py`: Implements the command-line arguments parsing.
* `data_loader.py`: Implements the data loading and augmentation.
* `model_fn.py`: Implements the logic for training and inference.
* `hooks/training_hook.py`: Collects different metrics during training.
* `hooks/profiling_hook.py`: Collects different metrics to be used for benchmarking and testing.
* `parse_results.py`: Implements the intermediate results parsing.
* `setup.py`: Implements helper setup functions.
The `model/` folder contains information about the building blocks of UNet and the way they are assembled. Its contents are:
* `layers.py`: Defines the different blocks that are used to assemble UNet
* `unet.py`: Defines the model architecture using the blocks from the `layers.py` script
Other folders included in the root directory are:
* `examples/`: Provides examples for training and benchmarking UNet
* `images/`: Contains a model diagram
### Parameters
The complete list of the available parameters for the main.py script contains:
* `--exec_mode`: Select the execution mode to run the model (default: `train`). Modes available:
* `train` - trains model from scratch.
* `evaluate` - loads checkpoint (if available) and performs evaluation on validation subset (requires `--crossvalidation_idx` other than `None`).
* `train_and_evaluate` - trains model from scratch and performs validation at the end (requires `--crossvalidation_idx` other than `None`).
* `predict` - loads checkpoint (if available) and runs inference on the test set. Stores the results in `--model_dir` directory.
* `train_and_predict` - trains model from scratch and performs inference.
* `--model_dir`: Set the output directory for information related to the model (default: `/results`).
* `--log_dir`: Set the output directory for logs (default: None).
* `--data_dir`: Set the input directory containing the dataset (default: `None`).
* `--batch_size`: Size of each minibatch per GPU (default: `1`).
* `--crossvalidation_idx`: Selected fold for cross-validation (default: `None`).
* `--max_steps`: Maximum number of steps (batches) for training (default: `1000`).
* `--seed`: Set random seed for reproducibility (default: `0`).
* `--weight_decay`: Weight decay coefficient (default: `0.0005`).
* `--log_every`: Log performance every n steps (default: `100`).
* `--learning_rate`: Model’s learning rate (default: `0.0001`).
* `--augment`: Enable data augmentation (default: `False`).
* `--benchmark`: Enable performance benchmarking (default: `False`). If the flag is set, the script runs in a benchmark mode - each iteration is timed and the performance result (in images per second) is printed at the end. Works for both `train` and `predict` execution modes.
* `--warmup_steps`: Used during benchmarking - the number of steps to skip (default: `200`). First iterations are usually much slower since the graph is being constructed. Skipping the initial iterations is required for a fair performance assessment.
* `--xla`: Enable accelerated linear algebra optimization (default: `False`).
* `--amp`: Enable automatic mixed precision (default: `False`).
### Command line options
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
```bash
python main.py --help
```
The following example output is printed when running the model:
```python main.py --help
usage: main.py [-h]
[--exec_mode {train,train_and_predict,predict,evaluate,train_and_evaluate}]
[--model_dir MODEL_DIR] --data_dir DATA_DIR [--log_dir LOG_DIR]
[--batch_size BATCH_SIZE] [--learning_rate LEARNING_RATE]
[--crossvalidation_idx CROSSVALIDATION_IDX]
[--max_steps MAX_STEPS] [--weight_decay WEIGHT_DECAY]
[--log_every LOG_EVERY] [--warmup_steps WARMUP_STEPS]
[--seed SEED] [--augment] [--benchmark]
[--amp] [--xla]
UNet-medical
optional arguments:
-h, --help show this help message and exit
--exec_mode {train,train_and_predict,predict,evaluate,train_and_evaluate}
Execution mode of running the model
--model_dir MODEL_DIR
Output directory for information related to the model
--data_dir DATA_DIR Input directory containing the dataset for training
the model
--log_dir LOG_DIR Output directory for training logs
--batch_size BATCH_SIZE
Size of each minibatch per GPU
--learning_rate LEARNING_RATE
Learning rate coefficient for AdamOptimizer
--crossvalidation_idx CROSSVALIDATION_IDX
Chosen fold for cross-validation. Use None to disable
cross-validation
--max_steps MAX_STEPS
Maximum number of steps (batches) used for training
--weight_decay WEIGHT_DECAY
Weight decay coefficient
--log_every LOG_EVERY
Log performance every n steps
--warmup_steps WARMUP_STEPS
Number of warmup steps
--seed SEED Random seed
--augment Perform data augmentation during training
--benchmark Collect performance metrics during training
--amp Train using TF-AMP
--xla Train using XLA
```
## Getting the data
The UNet model uses the [EM segmentation challenge dataset](http://brainiac2.mit.edu/isbi_challenge/home). Test images provided by the organization were used to produce the resulting masks for submission. The challenge's data is made available upon registration.
Training and test data are comprised of three 512x512x30 `TIF` volumes (`test-volume.tif`, `train-volume.tif` and `train-labels.tif`). Files `test-volume.tif` and `train-volume.tif` contain grayscale 2D slices to be segmented. Additionally, training masks are provided in `train-labels.tif` as a 512x512x30 `TIF` volume, where each pixel has one of two classes:
* 0 indicating the presence of cellular membrane,
* 1 corresponding to background.
The objective is to produce a set of masks that segment the data as accurately as possible. The results are expected to be submitted as a 32-bit `TIF` 3D image, with values between `0` (100% membrane certainty) and `1` (100% non-membrane certainty).
#### Dataset guidelines
The training and test datasets are given as stacks of 30 2D-images provided as a multi-page `TIF` that can be read using the Pillow library and NumPy (both Python packages are installed by the `Dockerfile`).
Initially, data is loaded from a multi-page `TIF` file and converted to 512x512x30 NumPy arrays with the use of Pillow. The process of loading, normalizing and augmenting the data contained in the dataset can be found in the `data_loader.py` script.
These NumPy arrays are fed to the model through `tf.data.Dataset.from_tensor_slices()`, in order to achieve high performance.
The voxel intensities then normalized to an interval `[-1, 1]`, whereas labels are one-hot encoded for their later use in dice or pixel-wise cross-entropy loss, becoming 512x512x30x2 tensors.
If augmentation is enabled, the following set of augmentation techniques are applied:
* Random horizontal flipping
* Random vertical flipping
* Crop to a random dimension and resize to input dimension
* Random brightness shifting
In the end, images are reshaped to 388x388 and padded to 572x572 to fit the input of the network. Masks are only reshaped to 388x388 to fit the output of the network. Moreover, pixel intensities are clipped to the `[-1, 1]` interval.
#### Multi-dataset
This implementation is tuned for the EM segmentation challenge dataset. Using other datasets is possible, but might require changes to the code (data loader) and tuning some hyperparameters (e.g. learning rate, number of iterations).
In the current implementation, the data loader works with NumPy arrays by loading them at the initialization, and passing them for training in slices by `tf.data.Dataset.from_tensor_slices()`. If you’re able to fit your dataset into the memory, then convert the data into three NumPy arrays - training images, training labels, and testing images (optional). If your dataset is large, you will have to adapt the optimizer for the lazy-loading of data. For a walk-through, check the [TensorFlow tf.data API guide](https://www.tensorflow.org/guide/data_performance)
The performance of the model depends on the dataset size.
Generally, the model should scale better for datasets containing more data. For a smaller dataset, you might experience lower performance.
### Training process
The model trains for a total 6,400 batches (6,400 / number of GPUs), with the default UNet setup:
* Adam optimizer with learning rate of 0.0001.
This default parametrization is applied when running scripts from the `./examples` directory and when running `main.py` without explicitly overriding these parameters. By default, the training is in full precision. To enable AMP, pass the `--amp` flag. AMP can be enabled for every mode of execution.
The default configuration minimizes a function _L = 1 - DICE + cross entropy_ during training.
The training can be run directly without using the predefined scripts. The name of the training script is `main.py`. Because of the multi-GPU support, training should always be run with the Horovod distributed launcher like this:
```bash
horovodrun -np <number/of/gpus> python main.py --data_dir /data [other parameters]
```
*Note:* When calling the `main.py` script manually, data augmentation is disabled. In order to enable data augmentation, use the `--augment` flag in your invocation.
The main result of the training are checkpoints stored by default in `./results/` on the host machine, and in the `/results` in the container. This location can be controlled
by the `--model_dir` command-line argument, if a different location was mounted while starting the container. In the case when the training is run in `train_and_predict` mode, the inference will take place after the training is finished, and inference results will be stored to the `/results` directory.
If the `--exec_mode train_and_evaluate` parameter was used, and if `--crossvalidation_idx` parameter is set to an integer value of {0, 1, 2, 3, 4}, the evaluation of the validation set takes place after the training is completed. The results of the evaluation will be printed to the console.
### Inference process
Inference can be launched with the same script used for training by passing the `--exec_mode predict` flag:
```bash
python main.py --exec_mode predict --data_dir /data --model_dir <path/to/checkpoint> [other parameters]
```
The script will then:
* Load the checkpoint from the directory specified by the `<path/to/checkpoint>` directory (`/results`),
* Run inference on the test dataset,
* Save the resulting binary masks in a `TIF` format.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training, run one of the `TRAIN_BENCHMARK` scripts in `./examples/`:
```bash
bash examples/unet_TRAIN_BENCHMARK{_TF-AMP}_{1, 8}GPU.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
For example, to benchmark training using mixed-precision on 8 GPUs use:
```bash
bash examples/unet_TRAIN_BENCHMARK_TF-AMP_8GPU.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
Each of these scripts will by default run 200 warm-up iterations and benchmark the performance during training in the next 800 iterations.
To have more control, you can run the script by directly providing all relevant run parameters. For example:
```bash
horovodrun -np <num/of/gpus> python main.py --exec_mode train --benchmark --augment --data_dir <path/to/dataset> --model_dir <optional, path/to/checkpoint> --batch_size <batch/size> --warmup_steps <warm-up/steps> --max_steps <max/steps>
```
At the end of the script, a line reporting the best train throughput will be printed.
#### Inference performance benchmark
To benchmark inference, run one of the scripts in `./examples/`:
```bash
bash examples/unet_INFER_BENCHMARK{_TF-AMP}.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
For example, to benchmark inference using mixed-precision:
```bash
bash examples/unet_INFER_BENCHMARK_TF-AMP.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
Each of these scripts will by default run 200 warm-up iterations and benchmark the performance during inference in the next 400 iterations.
To have more control, you can run the script by directly providing all relevant run parameters. For example:
```bash
python main.py --exec_mode predict --benchmark --data_dir <path/to/dataset> --model_dir <optional, path/to/checkpoint> --batch_size <batch/size> --warmup_steps <warm-up/steps> --max_steps <max/steps>
```
At the end of the script, a line reporting the best inference throughput will be printed.
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
The following table lists the average DICE score across 5-fold cross-validation. Our results were obtained by running the `examples/unet_TRAIN{_TF-AMP}_{1, 8}GPU.sh` training script in the `tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs.
| GPUs | Batch size / GPU | Accuracy - TF32 | Accuracy - mixed precision | Time to train - TF32 [min] | Time to train - mixed precision [min] | Time to train speedup (TF32 to mixed precision) |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 8 | 0.8908 | 0.8910 | 22 | 10 | 2.2 |
| 8 | 8 | 0.8938 | 0.8942 | 2.6 | 2.5 | 1.04 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
The following table lists the average DICE score across 5-fold cross-validation. Our results were obtained by running the `examples/unet_TRAIN_{FP32, TF-AMP}_{1, 8}GPU.sh` training script in the `tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs.
| GPUs | Batch size / GPU | Accuracy - FP32 | Accuracy - mixed precision | Time to train - FP32 [min] | Time to train - mixed precision [min] | Time to train speedup (FP32 to mixed precision) |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 8 | 0.8910 | 0.8903 | 48 | 19 | 2.53 |
| 8 | 8 | 0.8942 | 0.8940 | 7 | 7.5 | 0.93 |
To reproduce this result, start the Docker container interactively and run one of the TRAIN scripts:
```bash
bash examples/unet_TRAIN{_TF-AMP}_{1, 8}GPU.sh <path/to/dataset> <path/to/checkpoint> <batch/size>
```
for example
```bash
bash examples/unet_TRAIN_TF-AMP_8GPU.sh /data /results 8
```
This command will launch a script which will run 5-fold cross-validation training for 40,000 iterations and print the validation DICE score and cross-entropy loss. The time reported is for one fold, which means that the training for 5 folds will take 5 times longer. The default batch size is 8, however if you have less than 16 Gb memory card and you encounter GPU memory issue you should decrease the batch size. The logs of the runs can be found in `/results` directory once the script is finished.
**Learning curves**
The following image show the training loss as a function of iteration for training using DGX A100 (TF32 and TF-AMP) and DGX-1 V100 (FP32 and TF-AMP).
![LearningCurves](images/U-NetMed_TF1_conv.png)
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `examples/unet_TRAIN_BENCHMARK{_TF-AMP}_{1, 8}GPU.sh` training script in the `examples/unet_TRAIN_BENCHMARK_{TF-AMP, FP32}_{1, 8}GPU.sh` NGC container on NVIDIA DGX A100 (8x A100 40GB) GPUs. Performance numbers (images per second) were averaged over 1000 iterations, excluding the first 200 warm-up steps.
| GPUs | Batch size / GPU | Throughput - TF32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 1 | 29.81 | 64.22 | 2.15 | - | - |
| 1 | 8 | 40.50 | 120.08 | 2.58 | - | - |
| 8 | 1 | 169.62 | 293.31 | 1.73 | 5.69 | 4.57 |
| 8 | 8 | 304.64 | 738.64 | 2.42 | 6.55 | 6.15 |
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `examples/unet_TRAIN_BENCHMARK{_TF-AMP}_{1, 8}GPU.sh` training script in the `tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Performance numbers (in images per second) were averaged over 1000 iterations, excluding the first 200 warm-up steps.
| GPUs | Batch size / GPU | Throughput - FP32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 1 | 15.70 | 39.62 | 2.52 | - | - |
| 1 | 8 | 18.85 | 60.28 | 3.20 | - | - |
| 8 | 1 | 102.52 | 212.51 | 2.07 | 6.53 | 5.36 |
| 8 | 8 | 141.75 | 403.88 | 2.85 | 7.52 | 6.70 |
To achieve these same results, follow the steps in the [Training performance benchmark](#training-performance-benchmark) section.
Throughput is reported in images per second. Latency is reported in milliseconds per image.
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
Our results were obtained by running the `examples/unet_INFER_BENCHMARK{_TF-AMP}.sh` inferencing benchmarking script in the `tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX A100 (1x A100 40GB) GPU.
FP16
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 251.11 | 3.983 | 3.990 | 3.991 | 3.993 |
| 2 | 572x572x1 | 179.70 | 11.130 | 11.138 | 11.139 | 11.142 |
| 4 | 572x572x1 | 197.53 | 20.250 | 20.260 | 20.262 | 20.266 |
| 8 | 572x572x1 | 382.48 | 24.050 | 29.356 | 30.372 | 32.359 |
| 16 | 572x572x1 | 400.58 | 45.759 | 55.615 | 57.502 | 61.192 |
TF32
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 88.80 | 11.261 | 11.264 | 11.264 | 11.265 |
| 2 | 572x572x1 | 104.62 | 19.120 | 19.149 | 19.155 | 19.166 |
| 4 | 572x572x1 | 117.02 | 34.184 | 34.217 | 34.223 | 34.235 |
| 8 | 572x572x1 | 131.54 | 65.094 | 72.577 | 74.009 | 76.811 |
| 16 | 572x572x1 | 137.41 | 121.552 | 130.795 | 132.565 | 136.027 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the `examples/unet_INFER_BENCHMARK{_TF-AMP}.sh` inferencing benchmarking script in the `tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with (1x V100 16GB) GPU.
FP16
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 127.11 | 7.868 | 7.875 | 7.876 | 7.879 |
| 2 | 572x572x1 | 140.32 | 14.256 | 14.278 | 14.283 | 14.291 |
| 4 | 572x572x1 | 148.28 | 26.978 | 27.005 | 27.010 | 27.020 |
| 8 | 572x572x1 | 178.28 | 48.432 | 54.613 | 55.797 | 58.111 |
| 16 | 572x572x1 | 181.94 | 94.812 | 106.743 | 109.028 | 113.496 |
FP32
| Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] |
|:----------:|:----------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:|
| 1 | 572x572x1 | 47.32 | 21.133 | 21.155 | 21.159 | 21.167 |
| 2 | 572x572x1 | 51.43 | 38.888 | 38.921 | 38.927 | 38.940 |
| 4 | 572x572x1 | 53.56 | 74.692 | 74.763 | 74.777 | 74.804 |
| 8 | 572x572x1 | 54.41 | 152.733 | 163.148 | 165.142 | 169.042 |
| 16 | 572x572x1 | 67.11 | 245.775 | 259.548 | 262.186 | 267.343 |
To achieve these same results, follow the steps in the [Inference performance benchmark](#inference-performance-benchmark) section.
Throughput is reported in images per second. Latency is reported in milliseconds per batch.
## Release notes
### Changelog
April 2023
* Ceased maintenance of this model in TensorFlow1
June 2020
* Updated training and inference accuracy with A100 results
* Updated training and inference performance with A100 results
February 2020
* Updated README template
* Added cross-validation for accuracy measurements
* Changed optimizer to Adam and updated accuracy table
* Updated performance values
July 2019
* Added inference benchmark for T4
* Added inference example scripts
* Added inference benchmark measuring latency
* Added TRT/TF-TRT support
* Updated Pre-trained model on NGC registry
June 2019
* Updated README template
April 2019
* Initial release
### Known issues
There are no known issues in this release.
|
TensorFlow/Translation/GNMT | GNMT | README | # GNMT v2 For TensorFlow
This repository provides a script and recipe to train the GNMT v2 model to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
GNMT model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Validation process](#validation-process)
* [Translation process](#translation-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training stability test](#training-stability-test)
* [Inference accuracy results](#inference-accuracy-results)
* [Inference accuracy: NVIDIA DGX-1 (8x V100 16GB)](#inference-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The GNMT v2 model is similar to the one discussed in the [Google's Neural Machine
Translation System: Bridging the Gap between Human and Machine
Translation](https://arxiv.org/abs/1609.08144) paper.
The most important difference between the two models is in the attention
mechanism. In our model, the output from the first LSTM layer of the decoder
goes into the attention module, then the re-weighted context is concatenated
with inputs to all subsequent LSTM layers in the decoder at the current
timestep.
The same attention mechanism is also implemented in the default
GNMT-like models from
[TensorFlow Neural Machine Translation Tutorial](https://github.com/tensorflow/nmt)
and
[NVIDIA OpenSeq2Seq Toolkit](https://github.com/NVIDIA/OpenSeq2Seq).
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
The following image shows the GNMT model architecture:
![TrainingLoss](./img/diagram.png)
### Default configuration
The following features were implemented in this model:
* general:
* encoder and decoder are using shared embeddings
* data-parallel multi-GPU training
* dynamic loss scaling with backoff for Tensor Cores (mixed precision) training
* trained with label smoothing loss (smoothing factor 0.1)
* encoder:
* 4-layer LSTM, hidden size 1024, first layer is bidirectional, the rest are
unidirectional
* with residual connections starting from 3rd layer
* dropout is applied on input to all LSTM layers, probability of dropout is
set to 0.2
* hidden state of LSTM layers is initialized with zeros
* weights and bias of LSTM layers is initialized with uniform (-0.1, 0.1)
Distribution
* decoder:
* 4-layer unidirectional LSTM with hidden size 1024 and fully-connected
classifier
* with residual connections starting from 3rd layer
* dropout is applied on input to all LSTM layers, probability of dropout is
set to 0.2
* hidden state of LSTM layers is initialized with the last hidden state from
encoder
* weights and bias of LSTM layers is initialized with uniform (-0.1, 0.1)
distribution
* weights and bias of fully-connected classifier is initialized with
uniform (-0.1, 0.1) distribution
* attention:
* normalized Bahdanau attention
* output from first LSTM layer of decoder goes into attention,
then re-weighted context is concatenated with the input to all subsequent
LSTM layers of the decoder at the current timestep
* linear transform of keys and queries is initialized with uniform (-0.1, 0.1), normalization scalar is initialized with 1.0 / sqrt(1024), normalization bias is initialized with zero
* inference:
* beam search with default beam size of 5
* with coverage penalty and length normalization, coverage penalty factor is
set to 0.1, length normalization factor is set to 0.6 and length
normalization constant is set to 5.0
* de-tokenized BLEU computed by [SacreBLEU](https://github.com/awslabs/sockeye/tree/master/sockeye_contrib/sacrebleu)
* [motivation](https://github.com/awslabs/sockeye/tree/master/sockeye_contrib/sacrebleu#motivation) for choosing SacreBLEU
When comparing the BLEU score, there are various tokenization approaches and
BLEU calculation methodologies; therefore, ensure you align similar metrics.
Code from this repository can be used to train a larger, 8-layer GNMT v2 model.
Our experiments show that a 4-layer model is significantly faster to train and
yields comparable accuracy on the public
[WMT16 English-German](http://www.statmt.org/wmt16/translation-task.html)
dataset. The number of LSTM layers is controlled by the `--num_layers` parameter
in the `nmt.py` script.
### Feature support matrix
The following features are supported by this model.
| **Feature** | **GNMT TF** |
|:---:|:--------:|
| Automatic Mixed Precision | yes |
#### Features
The following features are supported by this model.
* Automatic Mixed Precision (AMP) - Computation graphs can be modified by TensorFlow on runtime to support mixed precision training. Detailed explanation of mixed precision can be found in the next section.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models.
To enable mixed precision, you can simply add the values to the environmental variables inside your training script:
- Enable TF-AMP graph rewrite:
```
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
```
- Enable Automated Mixed Precision:
```
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the GNMT v2 model.
### Requirements
This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [TensorFlow 20.06-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow)
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running TensorFlow](https://docs.nvidia.com/deeplearning/dgx/tensorflow-release-notes/running.html#running).
For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32,
perform the following steps using the default parameters of the GNMT v2 model
on the WMT16 English German dataset.
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
**1. Clone the repository.**
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/TensorFlow/Translation/GNMT
```
**2. Build the GNMT v2 TensorFlow container.**
```
bash scripts/docker/build.sh
```
**3. Start an interactive session in the NGC container to run.** training/inference.
```
bash scripts/docker/interactive.sh
```
**4. Download and preprocess the dataset.**
Data will be downloaded to the `data` directory (on the host). The `data`
directory is mounted to the `/workspace/gnmt/data` location in the Docker
container.
```
bash scripts/wmt16_en_de.sh
```
**5. Start training.**
All results and logs are saved to the `results` directory (on the host) or to
the `/workspace/gnmt/results` directory (in the container). The training script
saves the checkpoint after every training epoch and after every 2000 training steps
within each epoch. You can modify the results directory using the `--output_dir`
argument.
To launch mixed precision training on 1 GPU, run:
```
python nmt.py --output_dir=results --batch_size=128 --learning_rate=5e-4 --amp
```
To launch mixed precision training on 8 GPUs, run:
```
python nmt.py --output_dir=results --batch_size=1024 --num_gpus=8 --learning_rate=2e-3 --amp
```
To launch FP32 (TF32 on NVIDIA Ampere GPUs) training on 1 GPU, run:
```
python nmt.py --output_dir=results --batch_size=128 --learning_rate=5e-4
```
To launch FP32 (TF32 on NVIDIA Ampere GPUs) training on 8 GPUs, run:
```
python nmt.py --output_dir=results --batch_size=1024 --num_gpus=8 --learning_rate=2e-3
```
**6. Start evaluation.**
The training process automatically runs evaluation and outputs the BLEU score
after each training epoch. Additionally, after the training is done, you can
manually run inference on test dataset with the checkpoint saved during the
training.
To launch mixed precision inference on 1 GPU, run:
```
python nmt.py --output_dir=results --infer_batch_size=128 --mode=infer --amp
```
To launch FP32 (TF32 on NVIDIA Ampere GPUs) inference on 1 GPU, run:
```
python nmt.py --output_dir=results --infer_batch_size=128 --mode=infer
```
**7. Start translation.**
After the training is done, you can translate custom sentences with the checkpoint saved during the training.
```bash
echo "The quick brown fox jumps over the lazy dog" >file.txt
python nmt.py --output_dir=results --mode=translate --translate-file=file.txt
cat file.txt.trans
```
```
Der schnelle braune Fuchs springt über den faulen Hund
```
## Advanced
The following sections provide greater details of the dataset, running training
and inference, and the training results.
### Scripts and sample code
In the root directory, the most important files are:
* `nmt.py`: serves as the entry point to launch the training
* `Dockerfile`: container with the basic set of dependencies to run GNMT v2
* `requirements.txt`: set of extra requirements for running GNMT v2
* `attention_wrapper.py`, `gnmt_model.py`, `model.py`: model definition
* `estimator.py`: functions for training and inference
In the `script` directory, the most important files are:
* `translate.py`: wrapped on `nmt.py` for benchmarking and running inference
* `parse_log.py`: script for retrieving information in JSON format from the training log
* `wmt16_en_de.sh`: script for downloading and preprocessing the dataset
In the `script/docker` directory, the files are:
* `build.sh`: script for building the GNMT container
* `interactive.sh`: script for running the GNMT container interactively
### Parameters
The most useful arguments are as follows:
```
--learning_rate LEARNING_RATE
Learning rate.
--warmup_steps WARMUP_STEPS
How many steps we inverse-decay learning.
--max_train_epochs MAX_TRAIN_EPOCHS
Max number of epochs.
--target_bleu TARGET_BLEU
Target bleu.
--data_dir DATA_DIR Training/eval data directory.
--translate_file TRANSLATE_FILE
File to translate, works only with translate mode
--output_dir OUTPUT_DIR
Store log/model files.
--batch_size BATCH_SIZE
Total batch size.
--log_step_count_steps LOG_STEP_COUNT_STEPS
The frequency, in number of global steps, that the
global step and the loss will be logged during training
--num_gpus NUM_GPUS Number of gpus in each worker.
--random_seed RANDOM_SEED
Random seed (>0, set a specific seed).
--ckpt CKPT Checkpoint file to load a model for inference.
(defaults to newest checkpoint)
--infer_batch_size INFER_BATCH_SIZE
Batch size for inference mode.
--beam_width BEAM_WIDTH
beam width when using beam search decoder. If 0, use
standard decoder with greedy helper.
--amp use amp for training and inference
--mode {train_and_eval,infer,translate}
```
### Command-line options
To see the full list of available options and their descriptions, use the `-h`
or `--help` command line option, for example:
```
python nmt.py --help
```
### Getting the data
The GNMT v2 model was trained on the
[WMT16 English-German](http://www.statmt.org/wmt16/translation-task.html)
dataset and newstest2014 is used as a testing dataset.
This repository contains the `scripts/wmt16_en_de.sh` download script which
automatically downloads and preprocesses the training and test datasets. By
default, data is downloaded to the `data` directory.
Our download script is very similar to the `wmt16_en_de.sh` script from the
[tensorflow/nmt](https://github.com/tensorflow/nmt/blob/master/nmt/scripts/wmt16_en_de.sh)
repository. Our download script contains an extra preprocessing step, which
discards all pairs of sentences which can't be decoded by latin-1 encoder.
The `scripts/wmt16_en_de.sh` script uses the
[subword-nmt](https://github.com/rsennrich/subword-nmt)
package to segment text into subword units (Byte Pair Encodings - [BPE](https://en.wikipedia.org/wiki/Byte_pair_encoding)). By default, the script builds
the shared vocabulary of 32,000 tokens.
In order to test with other datasets, the scripts need to be customized accordingly.
#### Dataset guidelines
The process of downloading and preprocessing the data can be found in the
`scripts/wmt16_en_de.sh` script.
Initially, data is downloaded from [www.statmt.org](www.statmt.org). Then, `europarl-v7`,
`commoncrawl` and `news-commentary` corpora are concatenated to form the
training dataset, similarly `newstest2015` and `newstest2016` are concatenated
to form the validation dataset. Raw data is preprocessed with
[Moses](https://github.com/moses-smt/mosesdecoder), first by launching [Moses
tokenizer](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl)
(tokenizer breaks up text into individual words), then by launching
[clean-corpus-n.perl](https://github.com/moses-smt/mosesdecoder/blob/master/scripts/training/clean-corpus-n.perl)
which removes invalid sentences and does initial filtering by sequence length.
Second stage of preprocessing is done by launching the
`scripts/filter_dataset.py` script, which discards all pairs of sentences that
can't be decoded by latin-1 encoder.
Third state of preprocessing uses the
[subword-nmt](https://github.com/rsennrich/subword-nmt) package. First it
builds shared [byte pair
encoding](https://en.wikipedia.org/wiki/Byte_pair_encoding) vocabulary with
32,000 merge operations (command `subword-nmt learn-bpe`), then it applies
generated vocabulary to training, validation and test corpora (command
`subword-nmt apply-bpe`).
### Training process
The training configuration can be launched by running the `nmt.py` script.
By default, the training script saves the checkpoint after every training epoch
and after every 2000 training steps within each epoch.
Results are stored in the `results` directory.
The training script launches data-parallel training on multiple GPUs. We have
tested reliance on up to 8 GPUs on a single node.
After each training epoch, the script runs an evaluation and outputs a BLEU
score on the test dataset (*newstest2014*). BLEU is computed by the
[SacreBLEU](https://github.com/awslabs/sockeye/tree/master/sockeye_contrib/sacrebleu)
package. Logs from the training and evaluation are saved to the `results`
directory.
The training script automatically runs testing after each training epoch. The
results from the testing are printed to the standard output and saved to the
log files.
The summary after each training epoch is printed in the following format:
```
training time for epoch 1: 29.37 mins (2918.36 sent/sec, 139640.48 tokens/sec)
[...]
bleu is 20.50000
eval time for epoch 1: 1.57 mins (78.48 sent/sec, 4283.88 tokens/sec)
```
The BLEU score is computed on the test dataset.
Performance is reported in total sentences per second and in total tokens per
second. The performance result is averaged over an entire training epoch and
summed over all GPUs participating in the training.
To view all available options for training, run `python nmt.py --help`.
### Inference process
Validation and translation can be run by launching the `nmt.py` script, although, it requires a
pre-trained model checkpoint and tokenized input (for validation) and non-tokenized input (for translation).
#### Validation process
The `nmt.py` script supports batched validation (`--mode=infer` flag). By
default, it launches beam search with beam size of 5, coverage penalty term and
length normalization term. Greedy decoding can be enabled by setting the
`--beam_width=1` flag for the `nmt.py` inference script. To control the
batch size use the `--infer_batch_size` flag.
To view all available options for validation, run `python nmt.py --help`.
#### Translation process
The `nmt.py` script supports batched translation (`--mode=translate` flag). By
default, it launches beam search with beam size of 5, coverage penalty term and
length normalization term. Greedy decoding can be enabled by setting the
`--beam_width=1` flag for the `nmt.py` prediction script. To control the
batch size use the `--infer_batch_size` flag.
The input file may contain many sentences, each on a new line. The file can be specified
by the `--translate_file <file>` flag. This script will create a new file called `<file>.trans`,
with translation of the input file.
To view all available options for translation, run `python nmt.py --help`.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training performance, run:
* `python nmt.py --output_dir=results --max_train_epochs=1 --num_gpus <num GPUs> --batch_size <total batch size> --amp` for mixed precision
* `python nmt.py --output_dir=results --max_train_epochs=1 --num_gpus <num GPUs> --batch_size <total batch size>` for FP32/TF32
The log file will contain training performance in the following format:
```
training time for epoch 1: 25.75 mins (3625.19 sent/sec, 173461.27 tokens/sec)
```
#### Inference performance benchmark
To benchmark inference performance, run the `scripts/translate.py` script:
* For FP32/TF32:
`python scripts/translate.py --output_dir=/path/to/trained/model --beam_width <comma separated beam widths> --infer_batch_size <comma separated batch sizes>`
* For mixed precision
`python scripts/translate.py --output_dir=/path/to/trained/model --amp --beam_width <comma separated beam widths> --infer_batch_size <comma separated batch sizes>`
The benchmark requires a checkpoint from a fully trained model.
### Results
The following sections provide details on how we achieved our performance and
accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `examples/DGXA100_{TF32,AMP}_8GPU.sh`
training script in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX A100 (8x A100 40GB) GPUs.
| **GPUs** | **Batch size / GPU** |**Accuracy - mixed precision (BLEU)** | **Accuracy - TF32 (BLEU)** | **Time to train - mixed precision** | **Time to train - TF32** | **Time to train speedup (TF32 to mixed precision)** |
| --- | --- | ----- | ----- | -------- | -------- | ---- |
| 8 | 128 | 25.1 | 24.31 | 96 min | 139 min | 1.45 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `nmt.py` script in the
tensorflow-19.07-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs.
| **GPUs** | **Batch size / GPU** |**Accuracy - mixed precision (BLEU)** | **Accuracy - FP32 (BLEU)** | **Time to train - mixed precision** | **Time to train - FP32** | **Time to train speedup (FP32 to mixed precision)** |
| --- | --- | ----- | ----- | -------- | -------- | ---- |
| 1 | 128 | 24.90 | 24.84 | 763 min | 1237 min | 1.62 |
| 8 | 128 | 24.33 | 24.34 | 168 min | 237 min | 1.41 |
In the following plot, the BLEU scores after each training epoch for different
configurations are displayed.
![BLEUScore](./img/bleu_score.png)
##### Training stability test
The GNMT v2 model was trained for 6 epochs, starting from 6 different initial
random seeds. After each training epoch, the model was evaluated on the test
dataset and the BLEU score was recorded. The training was performed in the
tensorflow-20.06-tf1-py3 NGC container.
In the following tables, the BLEU scores after each training epoch for different
initial random seeds are displayed.
###### NVIDIA DGX A100 with 8 Ampere A100 40GB GPUs with TF32.
| Epoch | Average | Standard deviation | Minimum | Median | Maximum |
| ----- | ------- | ------------------ | ------- | ------ | ------- |
| 1 | 20.272 | 0.165 | 19.760 | 20.295 | 20.480 |
| 2 | 21.911 | 0.145 | 21.650 | 21.910 | 22.230 |
| 3 | 22.731 | 0.140 | 22.490 | 22.725 | 23.020 |
| 4 | 23.142 | 0.164 | 22.930 | 23.090 | 23.440 |
| 5 | 23.967 | 0.137 | 23.760 | 23.940 | 24.260 |
| 6 | 24.358 | 0.143 | 24.120 | 24.360 | 24.610 |
###### NVIDIA DGX-1 with 8 Tesla V100 16GB GPUs with FP32.
| Epoch | Average | Standard deviation | Minimum | Median | Maximum |
| ----- | ------- | ------------------ | ------- | ------ | ------- |
| 1 | 20.259 | 0.225 | 19.820 | 20.300 | 20.590 |
| 2 | 21.954 | 0.194 | 21.540 | 21.955 | 22.370 |
| 3 | 22.729 | 0.150 | 22.480 | 22.695 | 23.110 |
| 4 | 23.218 | 0.210 | 22.820 | 23.225 | 23.470 |
| 5 | 23.921 | 0.114 | 23.680 | 23.910 | 24.080 |
| 6 | 24.381 | 0.131 | 24.160 | 24.375 | 24.590 |
#### Inference accuracy results
##### Inference accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `scripts/translate.py` script in the tensorflow-19.07-py3 NGC container on NVIDIA DGX-1 8x V100 16GB GPUs.
* For mixed precision: `python scripts/translate.py --output_dir=/path/to/trained/model --beam_width 1,2,5 --infer_batch_size 128 --amp`
* For FP32: `python scripts/translate.py --output_dir=/path/to/trained/model --beam_width 1,2,5 --infer_batch_size 128`
| **Batch size** | **Beam size** | **Mixed precision BLEU** | **FP32 BLEU** |
|:---:|:---:|:---:|:---:|
|128|1|23.80|23.80|
|128|2|24.58|24.59|
|128|5|25.10|25.09|
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 40GB)
Our results were obtained by running the `examples/DGXA100_{TF32,AMP}_{1,8}GPU.sh`
training script in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX A100 (8x A100 40GB) GPUs.
Performance numbers (in items/images per second)
were averaged over an entire training epoch.
| **GPUs** | **Batch size / GPU** | **Throughput - mixed precision (tokens/s)** | **Throughput - TF32 (tokens/s)** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - TF32** |
| --- | --- | ------- | ------- | ---- | ---- | ---- |
| 1 | 128 | 29 911 | 31 110 | 0.96 | 1.00 | 1.00 |
| 8 | 128 | 181 384 | 175 292 | 1.03 | 6.06 | 5.63 |
To achieve these same results, follow the steps in the
[Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `nmt.py` script in the tensorflow-19.07-py3 NGC container on NVIDIA DGX-1 with 8x V100 16G GPUs.
Performance numbers (in tokens per second) were averaged over an entire
training epoch.
| **GPUs** | **Batch size / GPU** | **Throughput - mixed precision (tokens/s)** | **Throughput - FP32 (tokens/s)** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** |
| --- | --- | ------- | ------ | ---- | ---- | ---- |
| 1 | 128 | 23 011 | 14 106 | 1.63 | 1.00 | 1.00 |
| 8 | 128 | 138 106 | 93 688 | 1.47 | 6.00 | 6.64 |
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
#### Inference performance results
The benchmark requires a checkpoint from a fully trained model.
To launch the inference benchmark in mixed precision on 1 GPU, run:
```
python scripts/translate.py --output_dir=/path/to/trained/model --beam_width 1,2,5 --infer_batch_size 1,2,4,8,32,128,512 --amp
```
To launch the inference benchmark in FP32/TF32 on 1 GPU, run:
```
python scripts/translate.py --output_dir=/path/to/trained/model --beam_width 1,2,5 --infer_batch_size 1,2,4,8,32,128,512
```
To achieve these same results, follow the [Quick Start Guide](#quick-start-guide)
outlined above.
##### Inference performance: NVIDIA DGX A100 (1x A100 40GB)
Our results were obtained by running the
`python scripts/translate.py --infer_batch_size 1,2,4,8,32,128,512 --beam_width 1,2,5 {--amp}`
inferencing benchmarking script in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX A100 (1x A100 40GB) GPU.
FP16
| **Batch size** | **Beam width** | **Bleu** | **Sentences/sec** | **Tokens/sec** | **Latency Avg** | **Latency 50%** | **Latency 90%** | **Latency 95%** | **Latency 99%** | **Latency 100%** |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 1 | 23.80 | 13.67 | 737.89 | 73.15 | 67.69 | 121.98 | 137.20 | 162.74 | 201.06 |
| 1 | 2 | 24.58 | 13.40 | 721.18 | 74.65 | 69.12 | 123.99 | 138.82 | 169.58 | 198.49 |
| 1 | 5 | 25.10 | 12.12 | 647.78 | 82.53 | 76.53 | 136.35 | 152.59 | 196.09 | 216.55 |
| 2 | 1 | 23.80 | 21.55 | 1163.16 | 92.82 | 88.15 | 139.88 | 152.49 | 185.18 | 208.35 |
| 2 | 2 | 24.58 | 21.07 | 1134.42 | 94.91 | 89.62 | 142.08 | 158.12 | 188.00 | 205.08 |
| 2 | 5 | 25.10 | 19.59 | 1047.21 | 102.10 | 96.20 | 152.36 | 172.46 | 211.96 | 219.87 |
| 4 | 1 | 23.80 | 36.98 | 1996.27 | 108.16 | 105.07 | 150.42 | 161.56 | 200.99 | 205.87 |
| 4 | 2 | 24.57 | 34.92 | 1880.48 | 114.53 | 111.42 | 160.29 | 177.14 | 205.32 | 211.80 |
| 4 | 5 | 25.10 | 31.56 | 1687.34 | 126.74 | 122.06 | 179.68 | 201.38 | 225.08 | 229.14 |
| 8 | 1 | 23.80 | 64.52 | 3482.81 | 123.99 | 122.89 | 159.89 | 174.66 | 201.12 | 205.59 |
| 8 | 2 | 24.57 | 59.04 | 3178.17 | 135.50 | 135.23 | 180.50 | 191.66 | 214.95 | 216.84 |
| 8 | 5 | 25.09 | 55.51 | 2967.82 | 144.11 | 141.98 | 198.39 | 218.88 | 223.55 | 225.61 |
| 32 | 1 | 23.80 | 193.54 | 10447.04 | 165.34 | 163.56 | 211.67 | 215.37 | 221.07 | 221.14 |
| 32 | 2 | 24.57 | 182.00 | 9798.09 | 175.82 | 176.04 | 220.33 | 224.25 | 226.45 | 227.05 |
| 32 | 5 | 25.10 | 141.63 | 7572.02 | 225.94 | 225.59 | 278.38 | 279.56 | 281.61 | 282.13 |
| 128 | 1 | 23.80 | 556.57 | 30042.59 | 229.98 | 226.81 | 259.05 | 260.26 | 260.74 | 260.85 |
| 128 | 2 | 24.57 | 400.02 | 21535.38 | 319.98 | 328.23 | 351.31 | 352.82 | 353.01 | 353.06 |
| 128 | 5 | 25.10 | 235.14 | 12570.95 | 544.35 | 576.62 | 581.95 | 582.64 | 583.61 | 583.85 |
| 512 | 1 | 23.80 | 903.83 | 48786.58 | 566.48 | 570.44 | 579.74 | 580.66 | 581.39 | 581.57 |
| 512 | 2 | 24.58 | 588.63 | 31689.07 | 869.81 | 894.90 | 902.65 | 902.85 | 903.00 | 903.04 |
| 512 | 5 | 25.10 | 285.86 | 15283.40 | 1791.06 | 1835.19 | 1844.29 | 1845.59 | 1846.63 | 1846.89 |
TF32
| **Batch size** | **Beam width** | **Bleu** | **Sentences/sec** | **Tokens/sec** | **Latency Avg** | **Latency 50%** | **Latency 90%** | **Latency 95%** | **Latency 99%** | **Latency 100%** |
|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
| 1 | 1 | 23.82 | 13.25 | 715.47 | 75.45 | 69.81 | 125.63 | 141.89 | 169.70 | 209.78 |
| 1 | 2 | 24.59 | 13.21 | 711.16 | 75.72 | 70.06 | 124.75 | 140.20 | 173.23 | 201.39 |
| 1 | 5 | 25.08 | 12.38 | 661.99 | 80.76 | 74.90 | 131.93 | 148.91 | 187.05 | 208.39 |
| 2 | 1 | 23.82 | 21.61 | 1166.56 | 92.55 | 87.25 | 139.54 | 151.77 | 180.24 | 209.05 |
| 2 | 2 | 24.59 | 21.24 | 1143.63 | 94.17 | 88.78 | 139.70 | 156.61 | 189.09 | 205.06 |
| 2 | 5 | 25.10 | 19.49 | 1042.17 | 102.62 | 96.14 | 153.38 | 172.89 | 213.99 | 219.54 |
| 4 | 1 | 23.81 | 35.84 | 1934.49 | 111.62 | 108.73 | 154.52 | 165.42 | 207.88 | 211.29 |
| 4 | 2 | 24.58 | 34.71 | 1869.20 | 115.24 | 111.24 | 161.24 | 177.73 | 208.12 | 212.74 |
| 4 | 5 | 25.09 | 32.24 | 1723.86 | 124.07 | 119.35 | 177.54 | 196.69 | 221.10 | 223.52 |
| 8 | 1 | 23.80 | 64.08 | 3459.74 | 124.84 | 123.61 | 161.92 | 177.06 | 205.47 | 206.47 |
| 8 | 2 | 24.61 | 59.31 | 3193.52 | 134.89 | 133.44 | 182.92 | 192.71 | 216.04 | 218.78 |
| 8 | 5 | 25.10 | 56.60 | 3026.29 | 141.35 | 138.61 | 194.52 | 213.65 | 220.24 | 221.45 |
| 32 | 1 | 23.80 | 195.31 | 10544.22 | 163.85 | 162.80 | 212.71 | 215.41 | 216.92 | 217.34 |
| 32 | 2 | 24.61 | 185.66 | 9996.59 | 172.36 | 171.07 | 216.46 | 221.64 | 223.68 | 225.25 |
| 32 | 5 | 25.11 | 147.24 | 7872.61 | 217.34 | 214.97 | 269.75 | 270.71 | 271.44 | 272.87 |
| 128 | 1 | 23.81 | 576.54 | 31123.19 | 222.02 | 219.25 | 249.44 | 249.75 | 249.88 | 249.91 |
| 128 | 2 | 24.57 | 419.87 | 22609.82 | 304.86 | 314.47 | 332.18 | 334.13 | 336.22 | 336.74 |
| 128 | 5 | 25.10 | 245.76 | 13138.84 | 520.83 | 552.68 | 558.89 | 559.09 | 559.13 | 559.13 |
| 512 | 1 | 23.80 | 966.24 | 52156.34 | 529.89 | 534.82 | 558.30 | 559.33 | 560.16 | 560.36 |
| 512 | 2 | 24.58 | 642.41 | 34590.81 | 797.00 | 812.40 | 824.23 | 825.92 | 827.27 | 827.61 |
| 512 | 5 | 25.10 | 289.33 | 15468.09 | 1769.61 | 1817.19 | 1849.83 | 1855.17 | 1859.45 | 1860.51 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the
`python scripts/translate.py --infer_batch_size 1,2,4,8,32,128,512 --beam_width 1,2,5 {--amp}`
inferencing benchmarking script in the tensorflow-20.06-tf1-py3 NGC container
on NVIDIA DGX-1 with (1x V100 16GB) GPU.
FP16
| **Batch size** | **Sequence length** | **Throughput Avg** | **Latency Avg** | **Latency 90%** |**Latency 95%** |**Latency 99%** |
|------------|-----------------|-----|-----|-----|-----|-----|
| 1 | 1 | 23.78 | 9.06 | 489.00 | 110.41 | 102.80 | 183.54 | 206.33 | 242.44 | 306.21 |
| 1 | 2 | 24.58 | 8.68 | 467.35 | 115.22 | 107.17 | 188.75 | 212.36 | 258.15 | 306.15 |
| 1 | 5 | 25.09 | 8.39 | 448.32 | 119.25 | 109.79 | 195.68 | 220.56 | 276.41 | 325.65 |
| 2 | 1 | 23.82 | 14.59 | 787.70 | 137.04 | 129.38 | 206.35 | 224.94 | 267.30 | 318.60 |
| 2 | 2 | 24.57 | 14.44 | 777.60 | 138.51 | 131.07 | 206.67 | 228.95 | 275.56 | 311.23 |
| 2 | 5 | 25.11 | 13.78 | 736.99 | 145.11 | 136.76 | 216.01 | 243.24 | 299.28 | 315.88 |
| 4 | 1 | 23.82 | 23.79 | 1284.24 | 168.14 | 164.13 | 234.70 | 248.42 | 308.38 | 325.46 |
| 4 | 2 | 24.59 | 22.67 | 1220.66 | 176.45 | 171.40 | 243.76 | 271.92 | 314.79 | 330.19 |
| 4 | 5 | 25.08 | 22.33 | 1194.00 | 179.12 | 174.04 | 253.36 | 281.88 | 318.76 | 340.01 |
| 8 | 1 | 23.81 | 43.33 | 2338.68 | 184.63 | 183.25 | 237.66 | 266.73 | 305.89 | 315.03 |
| 8 | 2 | 24.60 | 39.12 | 2106.44 | 204.49 | 200.96 | 276.05 | 294.53 | 327.61 | 335.50 |
| 8 | 5 | 25.10 | 37.16 | 1987.05 | 215.26 | 210.92 | 295.65 | 323.83 | 337.09 | 343.03 |
| 32 | 1 | 23.82 | 129.52 | 6992.15 | 247.06 | 245.81 | 317.71 | 325.54 | 330.09 | 335.04 |
| 32 | 2 | 24.55 | 123.28 | 6637.86 | 259.57 | 261.07 | 319.13 | 333.45 | 338.75 | 342.57 |
| 32 | 5 | 25.05 | 88.74 | 4744.33 | 360.61 | 359.27 | 446.65 | 448.40 | 455.93 | 461.86 |
| 128 | 1 | 23.80 | 332.81 | 17964.83 | 384.60 | 382.14 | 434.46 | 436.71 | 439.64 | 440.37 |
| 128 | 2 | 24.59 | 262.87 | 14153.59 | 486.93 | 506.45 | 528.87 | 530.90 | 533.09 | 533.64 |
| 128 | 5 | 25.08 | 143.91 | 7695.36 | 889.42 | 932.93 | 965.67 | 966.26 | 966.53 | 966.59 |
| 512 | 1 | 23.80 | 613.57 | 33126.42 | 834.46 | 848.06 | 868.21 | 869.04 | 869.70 | 869.86 |
| 512 | 2 | 24.59 | 387.72 | 20879.62 | 1320.54 | 1343.05 | 1354.40 | 1356.50 | 1358.19 | 1358.61 |
| 512 | 5 | 25.10 | 199.48 | 10664.34 | 2566.67 | 2628.50 | 2642.59 | 2644.73 | 2646.44 | 2646.86 |
FP32
| **Batch size** | **Sequence length** | **Throughput Avg** | **Latency Avg** | **Latency 90%** |**Latency 95%** |**Latency 99%** |
|------------|-----------------|-----|-----|-----|-----|-----|
| 1 | 1 | 23.80 | 8.37 | 451.86 | 119.46 | 111.26 | 199.36 | 224.49 | 269.03 | 330.72 |
| 1 | 2 | 24.59 | 8.83 | 475.11 | 113.31 | 104.54 | 187.79 | 210.64 | 260.42 | 317.45 |
| 1 | 5 | 25.09 | 7.74 | 413.92 | 129.15 | 119.44 | 212.84 | 239.52 | 305.47 | 349.09 |
| 2 | 1 | 23.80 | 13.96 | 753.79 | 143.22 | 135.73 | 213.96 | 235.89 | 284.62 | 330.71 |
| 2 | 2 | 24.59 | 12.96 | 697.63 | 154.33 | 145.01 | 230.88 | 255.31 | 306.71 | 340.36 |
| 2 | 5 | 25.09 | 12.67 | 677.23 | 157.88 | 148.24 | 236.50 | 266.91 | 322.94 | 349.55 |
| 4 | 1 | 23.80 | 22.42 | 1209.97 | 178.44 | 172.70 | 247.51 | 266.07 | 326.95 | 343.86 |
| 4 | 2 | 24.59 | 20.55 | 1106.07 | 194.68 | 188.83 | 271.75 | 295.08 | 345.76 | 364.00 |
| 4 | 5 | 25.09 | 21.19 | 1132.58 | 188.81 | 182.77 | 268.18 | 298.53 | 331.96 | 357.36 |
| 8 | 1 | 23.80 | 39.32 | 2122.26 | 203.48 | 201.89 | 263.28 | 286.71 | 332.70 | 348.93 |
| 8 | 2 | 24.59 | 37.51 | 2019.43 | 213.26 | 211.55 | 283.67 | 302.28 | 338.47 | 356.51 |
| 8 | 5 | 25.09 | 31.69 | 1694.02 | 252.46 | 245.33 | 348.95 | 378.16 | 392.72 | 401.73 |
| 32 | 1 | 23.80 | 118.51 | 6396.93 | 270.02 | 269.22 | 337.17 | 352.12 | 361.36 | 361.40 |
| 32 | 2 | 24.59 | 100.23 | 5395.33 | 319.28 | 318.89 | 399.80 | 403.12 | 414.51 | 423.41 |
| 32 | 5 | 25.09 | 68.59 | 3666.77 | 466.55 | 466.84 | 581.77 | 586.42 | 589.04 | 593.41 |
| 128 | 1 | 23.80 | 256.49 | 13845.09 | 499.04 | 492.36 | 562.12 | 567.20 | 571.18 | 572.18 |
| 128 | 2 | 24.59 | 176.83 | 9519.12 | 723.86 | 754.89 | 792.12 | 793.86 | 796.44 | 797.09 |
| 128 | 5 | 25.09 | 96.21 | 5143.17 | 1330.48 | 1420.94 | 1427.91 | 1431.02 | 1435.23 | 1436.28 |
| 512 | 1 | 23.80 | 366.07 | 19759.97 | 1398.63 | 1421.81 | 1457.81 | 1461.04 | 1463.63 | 1464.27 |
| 512 | 2 | 24.59 | 225.48 | 12137.77 | 2270.75 | 2323.62 | 2338.62 | 2340.94 | 2342.80 | 2343.27 |
| 512 | 5 | 25.09 | 106.02 | 5667.78 | 4829.31 | 4946.65 | 4956.15 | 4957.85 | 4959.21 | 4959.55 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA T4
Our results were obtained by running the `scripts/translate.py` script in the tensorflow-19.07-py3 NGC container on NVIDIA T4.
Reported mixed precision speedups are relative to FP32 numbers for corresponding configuration.
| **Batch size** | **Beam size** | **Mixed precision tokens/s** | **Speedup** | **Mixed precision average latency (ms)** | **Average latency speedup** | **Mixed precision latency 50% (ms)** | **Latency 50% speedup** | **Mixed precision latency 90% (ms)** | **Latency 90% speedup** | **Mixed precision latency 95% (ms)** | **Latency 95% speedup** | **Mixed precision latency 99% (ms)** | **Latency 99% speedup** | **Mixed precision latency 100% (ms)** | **Latency 100% speedup** |
| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |
| 1 | 1 | 643 | 1.278 | 84 | 1.278 | 78 | 1.279 | 138 | 1.309 | 154 | 1.312 | 180 | 1.304 | 220 | 1.296 |
| 1 | 2 | 584 | 1.693 | 92 | 1.692 | 86 | 1.686 | 150 | 1.743 | 168 | 1.737 | 201 | 1.770 | 236 | 1.742 |
| 1 | 5 | 552 | 1.702 | 97 | 1.701 | 90 | 1.696 | 158 | 1.746 | 176 | 1.738 | 218 | 1.769 | 244 | 1.742 |
| 2 | 1 | 948 | 1.776 | 114 | 1.776 | 108 | 1.769 | 170 | 1.803 | 184 | 1.807 | 218 | 1.783 | 241 | 1.794 |
| 2 | 2 | 912 | 1.761 | 118 | 1.760 | 112 | 1.763 | 175 | 1.776 | 192 | 1.781 | 226 | 1.770 | 246 | 1.776 |
| 2 | 5 | 832 | 1.900 | 128 | 1.900 | 121 | 1.910 | 192 | 1.912 | 214 | 1.922 | 258 | 1.922 | 266 | 1.905 |
| 4 | 1 | 1596 | 1.792 | 135 | 1.792 | 132 | 1.791 | 187 | 1.799 | 197 | 1.815 | 241 | 1.784 | 245 | 1.796 |
| 4 | 2 | 1495 | 1.928 | 144 | 1.927 | 141 | 1.926 | 201 | 1.927 | 216 | 1.936 | 250 | 1.956 | 264 | 1.890 |
| 4 | 5 | 1308 | 1.702 | 164 | 1.702 | 159 | 1.702 | 230 | 1.722 | 251 | 1.742 | 283 | 1.708 | 288 | 1.699 |
| 8 | 1 | 2720 | 1.981 | 159 | 1.981 | 158 | 1.992 | 204 | 1.975 | 219 | 1.986 | 249 | 1.987 | 252 | 1.966 |
| 8 | 2 | 2554 | 1.809 | 169 | 1.808 | 168 | 1.829 | 224 | 1.797 | 237 | 1.783 | 260 | 1.807 | 262 | 1.802 |
| 8 | 5 | 1979 | 1.768 | 216 | 1.768 | 213 | 1.780 | 292 | 1.797 | 319 | 1.793 | 334 | 1.760 | 336 | 1.769 |
| 32 | 1 | 7449 | 1.775 | 232 | 1.774 | 231 | 1.777 | 292 | 1.789 | 300 | 1.760 | 301 | 1.768 | 301 | 1.768 |
| 32 | 2 | 5569 | 1.670 | 309 | 1.669 | 311 | 1.672 | 389 | 1.652 | 392 | 1.665 | 401 | 1.651 | 404 | 1.644 |
| 32 | 5 | 3079 | 1.867 | 556 | 1.867 | 555 | 1.865 | 692 | 1.858 | 695 | 1.860 | 702 | 1.847 | 703 | 1.847 |
| 128 | 1 | 12986 | 1.662 | 532 | 1.662 | 529 | 1.667 | 607 | 1.643 | 608 | 1.645 | 609 | 1.647 | 609 | 1.647 |
| 128 | 2 | 7856 | 1.734 | 878 | 1.734 | 911 | 1.755 | 966 | 1.742 | 967 | 1.741 | 968 | 1.744 | 968 | 1.744 |
| 128 | 5 | 3361 | 1.683 | 2036 | 1.682 | 2186 | 1.678 | 2210 | 1.673 | 2210 | 1.674 | 2211 | 1.674 | 2211 | 1.674 |
| 512 | 1 | 14932 | 1.825 | 1851 | 1.825 | 1889 | 1.808 | 1927 | 1.801 | 1928 | 1.800 | 1929 | 1.800 | 1930 | 1.799 |
| 512 | 2 | 8109 | 1.786 | 3400 | 1.786 | 3505 | 1.783 | 3520 | 1.782 | 3523 | 1.781 | 3525 | 1.781 | 3525 | 1.781 |
| 512 | 5 | 3370 | 1.802 | 8123 | 1.801 | 8376 | 1.798 | 8391 | 1.804 | 8394 | 1.804 | 8396 | 1.805 | 8397 | 1.805 |
## Release notes
### Changelog
1. Mar 18, 2019
* Initial release
2. June, 2019
* Performance improvements
3. June, 2020
* Updated performance tables to include A100 results
4. April 2023
* Ceased maintenance of this model in TensorFlow1
### Known issues
There are no known issues in this release.
|
PyTorch/SpeechSynthesis/FastPitch | FastPitch | README | # FastPitch 1.1 for PyTorch
This repository provides a script and recipe to train the FastPitch model to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Example: Training a model on Mandarin Chinese](#example-training-a-model-on-mandarin-chinese)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Expected training time](#expected-training-time)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-gpu-1x-a100-80gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
[FastPitch](https://arxiv.org/abs/2006.06873) is one of two major components in a neural, text-to-speech (TTS) system:
* a mel-spectrogram generator such as [FastPitch](https://arxiv.org/abs/2006.06873) or [Tacotron 2](https://arxiv.org/abs/1712.05884), and
* a waveform synthesizer such as [WaveGlow](https://arxiv.org/abs/1811.00002) (refer to [NVIDIA example code](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2)).
Such a two-component TTS system is able to synthesize natural-sounding speech from raw transcripts.
The FastPitch model generates mel-spectrograms and predicts a pitch contour from raw input text.
In version 1.1, it does not need any pre-trained aligning model to bootstrap from.
It allows exerting additional control over the synthesized utterances, such as:
* modify the pitch contour to control the prosody,
* increase or decrease the fundamental frequency in a natural sounding way, that preserves the perceived identity of the speaker,
* alter the rate of speech,
* adjust the energy,
* specify input as graphemes or phonemes,
* switch speakers when the model has been trained with data from multiple speakers.
Some of the capabilities of FastPitch are presented on the website with [samples](https://fastpitch.github.io/).
Speech synthesized with FastPitch has state-of-the-art quality, and does not suffer from missing/repeating phrases as Tacotron 2 does.
This is reflected in Mean Opinion Scores ([details](https://arxiv.org/abs/2006.06873)).
| Model | Mean Opinion Score (MOS) |
|:---------------|:-------------------------|
| Tacotron 2 | 3.946 ± 0.134 |
| FastPitch 1.0 | 4.080 ± 0.133 |
The current version of the model offers even higher quality, as reflected
in the pairwise preference scores ([details](https://arxiv.org/abs/2108.10447)).
| Model | Average preference |
|:---------------|:-------------------|
| FastPitch 1.0 | 0.435 ± 0.068 |
| FastPitch 1.1 | 0.565 ± 0.068 |
The FastPitch model is based on the [FastSpeech](https://arxiv.org/abs/1905.09263) model. The main differences between FastPitch and FastSpeech are that FastPitch:
* no dependence on external aligner (Transformer TTS, Tacotron 2); in version 1.1, FastPitch aligns audio to transcriptions by itself as in [One TTS Alignment To Rule Them All](https://arxiv.org/abs/2108.10447),
* explicitly learns to predict the pitch contour,
* pitch conditioning removes harsh sounding artifacts and provides faster convergence,
* no need for distilling mel-spectrograms with a teacher model,
* capabilities to train a multi-speaker model.
The FastPitch model is similar to [FastSpeech2](https://arxiv.org/abs/2006.04558), which has been developed concurrently. FastPitch averages pitch/energy values over input tokens, and treats energy as optional.
FastPitch is trained on a publicly
available [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/).
This model is trained with mixed precision using Tensor Cores on NVIDIA Volta, NVIDIA Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results from 2.0x to 2.7x faster than training without Tensor Cores while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
FastPitch is a fully feedforward [Transformer](#glossary) model that predicts mel-spectrograms
from raw text (Figure 1). The entire process is parallel, which means that all input letters are processed simultaneously to produce a full mel-spectrogram in a single forward pass.
<p align="center">
<img src="./img/fastpitch_model.png" alt="FastPitch model architecture" />
</p>
<p align="center">
<em>Figure 1. Architecture of FastPitch (<a href=”https://arxiv.org/abs/2006.06873”>source</a>). The model is composed of a bidirectional Transformer backbone (also known as a Transformer encoder), a pitch predictor, and a duration predictor. After passing through the first *N* Transformer blocks, encoding, the signal is augmented with pitch information and discretely upsampled. Then it goes through another set of *N* Transformer blocks, with the goal of
smoothing out the upsampled signal and constructing a mel-spectrogram.
</em>
</p>
### Default configuration
The FastPitch model supports multi-GPU and mixed precision training with dynamic loss
scaling (refer to Apex code
[here](https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py)),
as well as mixed precision inference.
The following features were implemented in this model:
* data-parallel multi-GPU training,
* dynamic loss scaling with backoff for Tensor Cores (mixed precision)
training,
* gradient accumulation for reproducible results regardless of the number of GPUs.
Pitch contours and mel-spectrograms can be generated online during training.
To speed-up training, those could be generated during the pre-processing step and read
directly from the disk during training. For more information on data pre-processing, refer to [Dataset guidelines
](#dataset-guidelines) and the [paper](https://arxiv.org/abs/2006.06873).
### Feature support matrix
The following features are supported by this model.
| Feature | FastPitch |
| :-------------------------------|----------:|
| Automatic mixed precision (AMP) | Yes |
| Distributed data parallel (DDP) | Yes |
#### Features
Automatic Mixed Precision (AMP) - This implementation uses native PyTorch AMP
implementation of mixed precision training. It allows us to use FP16 training
with FP32 master weights by modifying just a few lines of code.
DistributedDataParallel (DDP) - The model uses PyTorch Lightning implementation
of distributed data parallelism at the module level, which can run across
multiple machines.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, refer to the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
For training and inference, mixed precision can be enabled by adding the `--amp` flag.
Mixed precision is using [native PyTorch implementation](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/).
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Character duration**
The time during which a character is being articulated. It could be measured in milliseconds, mel-spectrogram frames, and so on. Some characters are not pronounced, and thus, have 0 duration.
**Fundamental frequency**
The lowest vibration frequency of a periodic soundwave, for example, is produced by a vibrating instrument, and it is perceived as the loudest. In the context of speech, it refers to the frequency of vibration of vocal cords. It is abbreviated as *f0*.
**Pitch**
A perceived frequency of vibration of music or sound.
**Transformer**
The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer, which repeatedly applies the attention mechanism. It transforms one sequence into another.
## Setup
The following section lists the requirements that you need to meet in order to start training the FastPitch model.
### Requirements
This repository contains Dockerfile that extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- [PyTorch 22.08-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
or newer
- supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- [Running PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the FastPitch model on the LJSpeech 1.1 dataset. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section. Pre-trained FastPitch models are available for download on [NGC](https://ngc.nvidia.com/catalog/models?query=FastPitch&quickFilter=models).
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PyTorch/SpeechSynthesis/FastPitch
```
2. Build and run the FastPitch PyTorch NGC container.
By default, the container will use all available GPUs.
```bash
bash scripts/docker/build.sh
bash scripts/docker/interactive.sh
```
3. Download and preprocess the dataset.
Use the scripts to automatically download and preprocess the training, validation, and test datasets:
```bash
bash scripts/download_dataset.sh
bash scripts/prepare_dataset.sh
```
The data is downloaded to the `./LJSpeech-1.1` directory (on the host). The
`./LJSpeech-1.1` directory is mounted under the `/workspace/fastpitch/LJSpeech-1.1`
location in the NGC container. The complete dataset has the following structure:
```bash
./LJSpeech-1.1
├── mels # (optional) Pre-calculated target mel-spectrograms; can be calculated online
├── metadata.csv # Mapping of waveforms to utterances
├── pitch # Fundamental frequency contours for input utterances; can be calculated online
├── README
└── wavs # Raw waveforms
```
4. Start training.
```bash
bash scripts/train.sh
```
The training will produce a FastPitch model capable of generating mel-spectrograms from raw text.
It will be serialized as a single `.pt` checkpoint file, along with a series of intermediate checkpoints.
The script is configured for 8x GPU with at least 16GB of memory. Consult [Training process](#training-process) and [example configs](#training-performance-benchmark) to adjust to a different configuration or enable Automatic Mixed Precision.
5. Start validation/evaluation.
Ensure your training loss values are comparable to those listed in the table in the
[Results](#results) section. Note that the validation loss is evaluated with ground truth durations for letters (not the predicted ones). The loss values are stored in the `./output/nvlog.json` log file, `./output/{train,val,test}` as TensorBoard logs, and printed to the standard output (`stdout`) during training.
The main reported loss is a weighted sum of losses for mel-, pitch-, and duration- predicting modules.
The audio can be generated by following the [Inference process](#inference-process) section below.
The synthesized audio should be similar to the samples in the `./audio` directory.
6. Start inference/predictions.
To synthesize audio, you will need a WaveGlow model, which generates waveforms based on mel-spectrograms generated with FastPitch. By now, a pre-trained model should have been downloaded by the `scripts/download_dataset.sh` script. Alternatively, to train WaveGlow from scratch, follow the instructions in [NVIDIA/DeepLearningExamples/Tacotron2](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) and replace the checkpoint in the `./pretrained_models/waveglow` directory.
You can perform inference using the respective `.pt` checkpoints that are passed as `--fastpitch`
and `--waveglow` arguments:
```bash
python inference.py \
--cuda \
--fastpitch output/<FastPitch checkpoint> \
--energy-conditioning \
--waveglow pretrained_models/waveglow/<WaveGlow checkpoint> \
--wn-channels 256 \
-i phrases/devset10.tsv \
-o output/wavs_devset10
```
The speech is generated from a file passed with the `-i` argument, with one utterance per line:
```bash
`<output wav file name>|<utterance>`
```
To run
inference in mixed precision, use the `--amp` flag. The output audio will
be stored in the path specified by the `-o` argument. Consult the `inference.py` to learn more options, such as setting the batch size.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
The repository holds code for FastPitch (training and inference) and WaveGlow (inference only).
The code specific to a particular model is located in that model’s directory - `./fastpitch` and `./waveglow` - and common functions live in the `./common` directory. The model-specific scripts are as follows:
* `<model_name>/model.py` - the model architecture, definition of forward and
inference functions
* `<model_name>/arg_parser.py` - argument parser for parameters specific to a
given model
* `<model_name>/data_function.py` - data loading functions
* `<model_name>/loss_function.py` - loss function for the model
In the root directory `./` of this repository, the `./train.py` script is used for
training, while inference can be executed with the `./inference.py` script. The
script `./models.py` is used to construct a model of the requested type and properties.
The repository is structured similarly to the [NVIDIA Tacotron2 Deep Learning example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) so that they could be combined in more advanced use cases.
### Parameters
In this section, we list the most important hyperparameters and command-line arguments,
together with their default values that are used to train FastPitch.
* `--epochs` - number of epochs (default: 1000)
* `--learning-rate` - learning rate (default: 0.1)
* `--batch-size` - batch size for a single forward-backward step (default: 16)
* `--grad-accumulation` - number of steps over which gradients are accumulated (default: 2)
* `--amp` - use mixed precision training (default: disabled)
* `--load-pitch-from-disk` - pre-calculated fundamental frequency values, estimated before training, are loaded from the disk during training (default: enabled)
* `--energy-conditioning` - enables additional conditioning on energy (default: enabled)
* `--p-arpabet` - probability of choosing phonemic over graphemic representation for every word, if available (default: 1.0)
### Command-line options
To review the full list of available options and their descriptions, use the `-h`
or `--help` command-line option, for example:
```bash
python train.py --help
```
The following example output is printed when running the model:
```bash
DLL 2021-06-14 23:08:53.659718 - epoch 1 | iter 1/48 | loss 40.97 | mel loss 35.04 | kl loss 0.02240 | kl weight 0.01000 | 5730.98 frames/s | took 24.54 s | lrate 3.16e-06
DLL 2021-06-14 23:09:28.449961 - epoch 1 | iter 2/48 | loss 41.07 | mel loss 35.12 | kl loss 0.02258 | kl weight 0.01000 | 4154.18 frames/s | took 34.79 s | lrate 6.32e-06
DLL 2021-06-14 23:09:59.365398 - epoch 1 | iter 3/48 | loss 40.86 | mel loss 34.93 | kl loss 0.02252 | kl weight 0.01000 | 4589.15 frames/s | took 30.91 s | lrate 9.49e-06
```
### Getting the data
The FastPitch and WaveGlow models were trained on the LJSpeech-1.1 dataset.
The `./scripts/download_dataset.sh` script will automatically download and extract the dataset to the `./LJSpeech-1.1` directory.
#### Dataset guidelines
The LJSpeech dataset has 13,100 clips that amount to about 24 hours of speech of a single female speaker. Since the original dataset does not define a train/dev/test split of the data, we provide a split in the form of three file lists:
```bash
./filelists
├── ljs_audio_pitch_text_train_v3.txt
├── ljs_audio_pitch_text_test.txt
└── ljs_audio_pitch_text_val.txt
```
FastPitch predicts character durations just as [FastSpeech](https://arxiv.org/abs/1905.09263) does.
FastPitch 1.1 aligns input symbols to output mel-spectrogram frames automatically and does not rely
on any external aligning model. FastPitch training can now be started on raw waveforms
without any pre-processing: pitch values and mel-spectrograms will be calculated online.
For every mel-spectrogram frame, its fundamental frequency in Hz is estimated with
the Probabilistic YIN algorithm.
<p align="center">
<img src="./img/pitch.png" alt="Pitch contour estimate" />
</p>
<p align="center">
<em>Figure 2. Pitch estimates for mel-spectrogram frames of the phrase "in being comparatively"
(in blue) averaged over characters (in red). Silent letters have a duration of 0 and are omitted.</em>
</p>
#### Multi-dataset
Follow these steps to use datasets different from the default LJSpeech dataset.
1. Prepare a directory with .wav files.
```bash
./my_dataset
└── wavs
```
2. Prepare filelists with transcripts and paths to .wav files. They define the training/validation split of the data (the test is currently unused):
```bash
./filelists
├── my-dataset_audio_text_train.txt
└── my-dataset_audio_text_val.txt
```
Those filelists should list a single utterance per line as:
```bash
`<audio file path>|<transcript>`
```
The `<audio file path>` is the relative path to the path provided by the `--dataset-path` option of `train.py`.
3. Run the pre-processing script to calculate pitch:
```bash
python prepare_dataset.py \
--wav-text-filelists filelists/my-dataset_audio_text_train.txt \
filelists/my-dataset_audio_text_val.txt \
--n-workers 16 \
--batch-size 1 \
--dataset-path $DATA_DIR \
--extract-pitch \
--f0-method pyin
```
4. Prepare file lists with paths to pre-calculated pitch:
```bash
./filelists
├── my-dataset_audio_pitch_text_train.txt
└── my-dataset_audio_pitch_text_val.txt
```
In order to use the prepared dataset, pass the following to the `train.py` script:
```bash
--dataset-path ./my_dataset` \
--training-files ./filelists/my-dataset_audio_pitch_text_train.txt \
--validation files ./filelists/my-dataset_audio_pitch_text_val.txt
```
### Training process
FastPitch is trained to generate mel-spectrograms from raw text input. It uses short-time Fourier transform (STFT)
to generate target mel-spectrograms from audio waveforms to be the training targets.
The training loss is averaged over an entire training epoch, whereas the
validation loss is averaged over the validation dataset. Performance is
reported in total output mel-spectrogram frames per second and recorded as `train_frames/s` (after each iteration) and `avg_train_frames/s` (averaged over epoch) in the output log file `./output/nvlog.json`.
The result is averaged over an entire training epoch and summed over all GPUs that were
included in the training.
The `scripts/train.sh` script is configured for 8x GPU with at least 16GB of memory:
```bash
--batch-size 16
--grad-accumulation 2
```
In a single accumulated step, there are `batch_size x grad_accumulation x GPUs = 256` examples being processed in parallel. With a smaller number of GPUs, increase `--grad_accumulation` to keep this relation satisfied, e.g., through env variables
```bash
NUM_GPUS=1 GRAD_ACCUMULATION=16 bash scripts/train.sh
```
### Inference process
You can run inference using the `./inference.py` script. This script takes
text as input and runs FastPitch and then WaveGlow inference to produce an
audio file. It requires pre-trained checkpoints of both models
and input text as a text file, with one phrase per line.
Pre-trained FastPitch models are available for download on [NGC](https://ngc.nvidia.com/catalog/models?query=FastPitch&quickFilter=models).
Having pre-trained models in place, run the sample inference on LJSpeech-1.1 test-set with:
```bash
bash scripts/inference_example.sh
```
Examine the `inference_example.sh` script to adjust paths to pre-trained models,
and call `python inference.py --help` to learn all available options.
By default, synthesized audio samples are saved in `./output/audio_*` folders.
FastPitch allows us to linearly adjust the rate of synthesized speech like [FastSpeech](https://arxiv.org/abs/1905.09263).
For instance, pass `--pace 0.5` for a twofold decrease in speed.
For every input character, the model predicts a pitch cue - an average pitch over a character in Hz.
Pitch can be adjusted by transforming those pitch cues. A few simple examples are provided below.
| Transformation | Flag | Samples |
| :-------------------------------------------|:------------------------------|:---------------------------------------:|
| - | - | [link](./audio/sample_fp16.wav) |
| Amplify pitch wrt. to the mean pitch |`--pitch-transform-amplify` | [link](./audio/sample_fp16_amplify.wav) |
| Invert pitch wrt. to the mean pitch |`--pitch-transform-invert` | [link](./audio/sample_fp16_invert.wav) |
| Raise/lower pitch by <hz> |`--pitch-transform-shift <hz>` | [link](./audio/sample_fp16_shift.wav) |
| Flatten the pitch to a constant value |`--pitch-transform-flatten` | [link](./audio/sample_fp16_flatten.wav) |
| Change the rate of speech (1.0 = unchanged) |`--pace <value>` | [link](./audio/sample_fp16_pace.wav) |
The flags can be combined. Modify these functions directly in the `inference.py` script to gain more control over the final result.
You can find all the available options by callng `python inference.py --help`.
More examples are presented on the website with [samples](https://fastpitch.github.io/).
### Example: Training a model on Mandarin Chinese
FastPitch can easily be trained or fine-tuned on datasets in various languages.
We present an example of training on the Mandarin Chinese dataset capable of pronouncing
phrases in English (for example, brand names).
For an overview of the deployment of this model in Chunghwa Telecom,
refer to the [blogpost](https://blogs.nvidia.com.tw/2022/06/20/cht-bilingual-speech-synthesis-enables-more-realistic-interactions/) (in Chinese).
1. Set up the repository and run a Docker container
Follow stetps 1. and 2. of the [Quick Start Guide](#quick-start-guide).
2. Download the data
The dataset for this section has been provided by Chunghwa Telecom Laboratories
and is available for [download on NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/sf_bilingual_speech_zh_en)
under the CC BY-NC 4.0 license.
The dataset can be downloaded manually after signing in to NGC as `files.zip` or `SF_bilingual.zip`, depending on the method (manual or via command line).
Afterward, it has to be pre-processed to extract pitch for training and prepare train/dev/test filelists:
```bash
pip install -r scripts/mandarin_chinese/requirements.txt
bash scripts/mandarin_chinese/prepare_dataset.sh path/to/files.zip
```
The procedure should take about half an hour. If it completes successfully,
`./data/SF_bilingual prepared successfully.` will be written to the standard output.
After pre-processing, the dataset will be located at `./data/SF_bilingual`,
and training/inference filelists at `./filelists/sf_*`.
3. Add support for textual inputs in the target language.
The model is trained end-to-end, and supporting a new language requires
to specify the input `symbol set`, `text normalization` routines,
and (optionally) grapheme-to-phoneme (G2P) conversion for phoneme-based synthesis.
Our main modifications touch the following files:
```bash
./common/text
├── symbols.py
├── text_processing.py
└── zh
├── chinese.py
├── mandarin_text_processing.py
└── pinyin_dict.txt
```
We make small changes to `symbols.py` and `text_processing.py` and keep
the crucial code in the `zh` directory.
We design our Mandarin Chinese symbol set as an extension of the English
symbol set, appending to `symbols` lists of `_mandarin_phonemes` and `_chinese_punctuation`:
```python
# common/text/symbols.py
def get_symbols(symbol_set='english_basic'):
# ...
elif symbol_set == 'english_mandarin_basic':
from .zh.chinese import chinese_punctuations, valid_symbols as mandarin_valid_symbols
# Prepend "#" to mandarin phonemes to ensure uniqueness (some are the same as uppercase letters):
_mandarin_phonemes = ['#' + s for s in mandarin_valid_symbols]
_pad = '_'
_punctuation = '!\'(),.:;? '
_chinese_punctuation = ["#" + p for p in chinese_punctuations]
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet + _mandarin_phonemes + _chinese_punctuation
```
Text normalization and G2P are performed by a `TextProcessing` instance. We implement Mandarin text processing
inside a `MandarinTextProcessing` class. For G2P, an off-shelf [pypinyin](https://github.com/mozillazg/python-pinyin) phonemizer and [the CMU Dictionary](http://www.speech.cs.cmu.edu/cgi-bin/cmudict) are used.
`MandarinTextProcessing` is applied to the data only if `english_mandarin_basic` symbol set is in use:
```python
# common/text/text_processing.py
def get_text_processing(symbol_set, text_cleaners, p_arpabet):
if symbol_set in ['englh_basic', 'english_basic_lowercase', 'english_expanded']:
return TextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
elif symbol_set == 'english_mandarin_basic':
from common.text.zh.mandarin_text_processing import MandarinTextProcessing
return MandarinTextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
```
Note that text normalization is dependent on the target language, domain, and assumptions
on how normalized the input already is.
4. Train the model
The `SF dataset` is rather small (4.5 h compared to 24 h in `LJSpeech-1.1`).
There are numerous English phrases in the transcriptions, such as technical terms
and proper nouns. Thus, it is beneficial to initialize model weights with
a pre-trained English model from NGC, using the flag `--init-from-checkpoint`.
Note that by initializing with another model, possibly trained on a different symbol set,
we also initialize grapheme/phoneme embedding tables. For this reason, we design
the `english_mandarin_basic` symbol set as an extension of `english_basic`,
so that the same English phonemes would retain their embeddings.
In order to train, issue
```bash
NUM_GPUS=<available_gpus> GRAD_ACCUMULATION=<number> bash scripts/mandarin_chinese/train.sh
```
Adjust the variables to satisfy `$NUM_GPUS x $GRAD_ACCUMULATION = 256`.
The model will be trained for 1000 epochs. Note that we have disabled mixed-precision
training, as we found it unstable at times on this dataset.
5. Synthesize
After training, samples can be synthesized ([audio sample](./audio/com_SF_ce1514_fastpitch_waveglow.wav)):
```bash
bash scripts/mandarin_chinese/inference.sh
```
Paths to specific checkpoints can be supplied as env variables or changed
directly in the `.sh` files.
## Performance
### Benchmarking
The following section shows how to run benchmarks measuring the model
performance in training and inference mode.
#### Training performance benchmark
To benchmark the training performance on a specific batch size, run:
* NVIDIA DGX A100 (8x A100 80GB)
```bash
AMP=true NUM_GPUS=1 BS=32 GRAD_ACCUMULATION=8 EPOCHS=10 bash scripts/train.sh
AMP=true NUM_GPUS=8 BS=32 GRAD_ACCUMULATION=1 EPOCHS=10 bash scripts/train.sh
AMP=false NUM_GPUS=1 BS=32 GRAD_ACCUMULATION=8 EPOCHS=10 bash scripts/train.sh
AMP=false NUM_GPUS=8 BS=32 GRAD_ACCUMULATION=1 EPOCHS=10 bash scripts/train.sh
```
* NVIDIA DGX-1 (8x V100 16GB)
```bash
AMP=true NUM_GPUS=1 BS=16 GRAD_ACCUMULATION=16 EPOCHS=10 bash scripts/train.sh
AMP=true NUM_GPUS=8 BS=16 GRAD_ACCUMULATION=2 EPOCHS=10 bash scripts/train.sh
AMP=false NUM_GPUS=1 BS=16 GRAD_ACCUMULATION=16 EPOCHS=10 bash scripts/train.sh
AMP=false NUM_GPUS=8 BS=16 GRAD_ACCUMULATION=2 EPOCHS=10 bash scripts/train.sh
```
Each of these scripts runs for 10 epochs, and for each epoch, measures the
average number of items per second. The performance results can be read from
the `nvlog.json` files produced by the commands.
#### Inference performance benchmark
To benchmark the inference performance on a specific batch size, run:
* For FP16
```bash
AMP=true BS_SEQUENCE=”1 4 8” REPEATS=100 bash scripts/inference_benchmark.sh
```
* For FP32 or TF32
```bash
AMP=false BS_SEQUENCE=”1 4 8” REPEATS=100 bash scripts/inference_benchmark.sh
```
The output log files will contain performance numbers for the FastPitch model
(number of output mel-spectrogram frames per second, reported as `generator_frames/s w
`)
and for WaveGlow (nuber of output samples per second, reported as ` waveglow_samples/s
`).
The `inference.py` script will run a few warm-up iterations before running the benchmark. Inference will be averaged over 100 runs, as set by the `REPEATS` env variable.
### Results
The following sections provide details on how we achieved our performance
and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `./platform/DGXA100_FastPitch_{AMP,TF32}_8GPU.sh` training script in the PyTorch 21.05-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs.
| Loss (Model/Epoch) | 50 | 250 | 500 | 750 | 1000 | 1250 | 1500 |
|:---------------------|------:|------:|------:|------:|------:|------:|------:|
| FastPitch AMP | 3.35 | 2.89 | 2.79 | 2.71 | 2.68 | 2.64 | 2.61 |
| FastPitch TF32 | 3.37 | 2.88 | 2.78 | 2.71 | 2.68 | 2.63 | 2.61 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `./platform/DGX1_FastPitch_{AMP,FP32}_8GPU.sh` training script in the PyTorch 21.05-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs.
All of the results were produced using the `train.py` script as described in the
[Training process](#training-process) section of this document.
| Loss (Model/Epoch) | 50 | 250 | 500 | 750 | 1000 | 1250 | 1500 |
|:---------------------|------:|------:|------:|------:|------:|------:|------:|
| FastPitch AMP | 3.38 | 2.88 | 2.79 | 2.71 | 2.68 | 2.64 | 2.61 |
| FastPitch FP32 | 3.38 | 2.89 | 2.80 | 2.71 | 2.68 | 2.65 | 2.62 |
<div style="text-align:center" align="center">
<img src="./img/loss.png" alt="Loss curves" />
</div>
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `./platform/DGXA100_FastPitch_{AMP,TF32}_8GPU.sh` training script in the PyTorch 22.08-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers, in output mel-scale spectrogram frames per second, were averaged over
an entire training epoch.
| Batch size / GPU | GPUs | Grad accumulation | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 to mixed precision) | Strong scaling - TF32 | Strong scaling - mixed precision |
|-----:|--:|---:|--------:|----------:|--------:|-----:|------:|
| 128 | 1 | 2 | 141,028 | 148,149 | 1.05 | 1.00 | 1.00 |
| 64 | 4 | 1 | 525,879 | 614,857 | 1.17 | 3.73 | 4.15 |
| 32 | 8 | 1 | 914,350 | 1,022,722 | 1.12 | 6.48 | 6.90 |
###### Expected training time
The following table shows the expected training time for convergence for 1000 epochs:
| Batch size / GPU | GPUs | Grad accumulation | Time to train with TF32 (Hrs) | Time to train with mixed precision (Hrs) | Speed-up with mixed precision|
|----:|--:|--:|-----:|-----:|-----:|
| 128 | 1 | 2 | 14.5 | 13.8 | 1.05 |
| 64 | 4 | 1 | 4.1 | 3.3 | 1.17 |
| 32 | 8 | 1 | 2.2 | 2.0 | 1.12 |
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `./platform/DGX1_FastPitch_{AMP,FP32}_8GPU.sh`
training script in the PyTorch 22.08-py3 NGC container on NVIDIA DGX-1 with
8x V100 16GB GPUs. Performance numbers, in output mel-scale spectrogram frames per second, were averaged over
an entire training epoch.
| Batch size / GPU | GPUs | Grad accumulation | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | Strong scaling - FP32 | Strong scaling - mixed precision |
|-----:|---:|-----:|---------:|----------:|--------:|-----:|------:|
| 16 | 1 | 16 | 31,863 | 83,761 | 2.63 | 1.00 | 1.00 |
| 16 | 4 | 4 | 117,971 | 269,143 | 2.28 | 3.70 | 3.21 |
| 16 | 8 | 2 | 225,826 | 435,799 | 1.93 | 7.09 | 5.20 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
###### Expected training time
The following table shows the expected training time for convergence for 1000 epochs:
| Batch size / GPU | GPUs | Grad accumulation | Time to train with FP32 (Hrs) | Time to train with mixed precision (Hrs) | Speed-up with mixed precision|
|---:|--:|---:|-----:|-----:|-----:|
| 16 | 1 | 16 | 64.2 | 24.4 | 2.63 |
| 16 | 4 | 4 | 17.4 | 7.6 | 2.28 |
| 16 | 8 | 2 | 9.1 | 4.7 | 1.93 |
Note that most of the quality is achieved after the initial 1000 epochs.
#### Inference performance results
The following tables show inference statistics for the FastPitch and WaveGlow
text-to-speech system, gathered from 100 inference runs. Latency is measured from the start of FastPitch inference to
the end of WaveGlow inference. Throughput is measured
as the number of generated audio samples per second at 22KHz. RTF is the real-time factor that denotes the number of seconds of speech generated in a second of wall-clock time per input utterance.
The used WaveGlow model is a 256-channel model.
Note that performance numbers are related to the length of input. The numbers reported below were taken with a moderate length of 128 characters. Longer utterances yield higher RTF, as the generator is fully parallel.
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running the `./scripts/inference_benchmark.sh` inferencing benchmarking script in the PyTorch 22.08-py3 NGC container on NVIDIA DGX A100 (1x A100 80GB) GPU.
FastPitch (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (frames/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.005 | 0.006 | 0.006 | 0.006 | 120,333 | 0.97 | 1397.07 |
| 4 | FP16 | 0.006 | 0.006 | 0.006 | 0.006 | 424,053 | 1.12 | 1230.81 |
| 8 | FP16 | 0.008 | 0.010 | 0.010 | 0.011 | 669,549 | 1.12 | 971.68 |
| 1 | TF32 | 0.005 | 0.006 | 0.006 | 0.007 | 123,718 | - | 1436.37 |
| 4 | TF32 | 0.007 | 0.007 | 0.007 | 0.007 | 379,980 | - | 1102.89 |
| 8 | TF32 | 0.009 | 0.009 | 0.009 | 0.009 | 600,435 | - | 871.38 |
FastPitch + HiFi-GAN (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.015 | 0.016 | 0.016 | 0.016 | 11,431,335 | 1.28 | 518.43 |
| 4 | FP16 | 0.038 | 0.040 | 0.040 | 0.040 | 17,670,528 | 1.42 | 200.35 |
| 8 | FP16 | 0.069 | 0.069 | 0.070 | 0.070 | 19,750,759 | 1.46 | 111.97 |
| 1 | TF32 | 0.019 | 0.020 | 0.020 | 0.020 | 8,912,296 | - | 404.19 |
| 4 | TF32 | 0.054 | 0.055 | 0.055 | 0.055 | 12,471,624 | - | 141.40 |
| 8 | TF32 | 0.100 | 0.100 | 0.100 | 0.101 | 13,543,317 | - | 76.78 |
FastPitch + WaveGlow (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.076 | 0.077 | 0.077 | 0.078 | 2,223,336 | 1.38 | 100.83 |
| 4 | FP16 | 0.265 | 0.267 | 0.267 | 0.267 | 2,552,577 | 1.36 | 28.94 |
| 8 | FP16 | 0.515 | 0.515 | 0.516 | 0.516 | 2,630,328 | 1.37 | 14.91 |
| 1 | TF32 | 0.105 | 0.106 | 0.106 | 0.107 | 1,610,266 | - | 73.03 |
| 4 | TF32 | 0.362 | 0.363 | 0.363 | 0.363 | 1,872,327 | - | 21.23 |
| 8 | TF32 | 0.708 | 0.709 | 0.709 | 0.709 | 1,915,577 | - | 10.86 |
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the `./scripts/inference_benchmark.sh` script in
the PyTorch 22.08-py3 NGC container. The input utterance has 128 characters, synthesized audio has 8.05 s.
FastPitch (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (frames/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.007 | 0.008 | 0.008 | 0.008 | 88,908 | 1.10 | 1032.23 |
| 4 | FP16 | 0.010 | 0.010 | 0.010 | 0.010 | 272,564 | 1.73 | 791.12 |
| 8 | FP16 | 0.013 | 0.013 | 0.013 | 0.013 | 415,263 | 2.35 | 602.65 |
| 1 | FP32 | 0.008 | 0.008 | 0.008 | 0.009 | 80,558 | - | 935.28 |
| 4 | FP32 | 0.017 | 0.017 | 0.017 | 0.017 | 157,114 | - | 456.02 |
| 8 | FP32 | 0.030 | 0.030 | 0.030 | 0.030 | 176,754 | - | 256.51 |
FastPitch + HiFi-GAN (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.025 | 0.025 | 0.025 | 0.025 | 6,788,274 | 2.09 | 307.86 |
| 4 | FP16 | 0.067 | 0.068 | 0.068 | 0.068 | 10,066,291 | 2.63 | 114.13 |
| 8 | FP16 | 0.123 | 0.124 | 0.124 | 0.124 | 10,992,774 | 2.78 | 62.32 |
| 1 | FP32 | 0.052 | 0.053 | 0.053 | 0.053 | 3,246,699 | - | 147.24 |
| 4 | FP32 | 0.177 | 0.178 | 0.179 | 0.179 | 3,829,018 | - | 43.41 |
| 8 | FP32 | 0.343 | 0.345 | 0.345 | 0.346 | 3,953,920 | - | 22.41 |
FastPitch + WaveGlow (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.134 | 0.135 | 0.135 | 0.135 | 1,259,550 | 2.89 | 57.12 |
| 4 | FP16 | 0.503 | 0.504 | 0.505 | 0.505 | 1,346,145 | 2.88 | 15.26 |
| 8 | FP16 | 0.995 | 0.999 | 0.999 | 1.001 | 1,360,952 | 2.89 | 7.72 |
| 1 | FP32 | 0.389 | 0.391 | 0.392 | 0.393 | 435,564 | - | 19.75 |
| 4 | FP32 | 1.453 | 1.455 | 1.456 | 1.457 | 466,685 | - | 5.29 |
| 8 | FP32 | 2.875 | 2.879 | 2.880 | 2.882 | 471,602 | - | 2.67 |
##### Inference performance: NVIDIA T4
Our results were obtained by running the `./scripts/inference_benchmark.sh` script in
the PyTorch 22.08-py3 NGC container.
The input utterance has 128 characters, synthesized audio has 8.05 s.
FastPitch (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (frames/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.008 | 0.008 | 0.008 | 0.008 | 87,937 | 1.69 | 1020.95 |
| 4 | FP16 | 0.017 | 0.017 | 0.017 | 0.018 | 154,880 | 2.55 | 449.54 |
| 8 | FP16 | 0.029 | 0.030 | 0.030 | 0.030 | 181,776 | 2.61 | 263.80 |
| 1 | FP32 | 0.013 | 0.013 | 0.013 | 0.013 | 52,062 | - | 604.45 |
| 4 | FP32 | 0.044 | 0.045 | 0.045 | 0.045 | 60,733 | - | 176.28 |
| 8 | FP32 | 0.076 | 0.077 | 0.077 | 0.077 | 69,685 | - | 101.13 |
FastPitch + HiFi-GAN (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.055 | 0.056 | 0.056 | 0.057 | 3,076,809 | 2.55 | 139.54 |
| 4 | FP16 | 0.201 | 0.203 | 0.204 | 0.204 | 3,360,014 | 2.67 | 38.10 |
| 8 | FP16 | 0.393 | 0.395 | 0.396 | 0.397 | 3,444,245 | 2.65 | 19.53 |
| 1 | FP32 | 0.140 | 0.142 | 0.142 | 0.142 | 1,208,678 | - | 54.82 |
| 4 | FP32 | 0.538 | 0.542 | 0.543 | 0.545 | 1,260,627 | - | 14.29 |
| 8 | FP32 | 1.045 | 1.049 | 1.050 | 1.051 | 1,297,726 | - | 7.36 |
FastPitch + WaveGlow (TorchScript, denoising)
| Batch size | Precision | Avg latency (s) | Latency tolerance interval 90% (s) | Latency tolerance interval 95% (s) | Latency tolerance interval 99% (s) | Throughput (samples/sec) | Speed-up with mixed precision | Avg RTF |
|--------------|-------------|-------------------|--------------------------------------|--------------------------------------|--------------------------------------|----------------------------|---------------------------------|-----------|
| 1 | FP16 | 0.409 | 0.411 | 0.411 | 0.412 | 414,019 | 2.65 | 18.78 |
| 4 | FP16 | 1.619 | 1.622 | 1.623 | 1.624 | 418,010 | 2.91 | 4.74 |
| 8 | FP16 | 3.214 | 3.219 | 3.220 | 3.222 | 421,148 | 2.72 | 2.39 |
| 1 | FP32 | 1.084 | 1.087 | 1.088 | 1.089 | 156,345 | - | 7.09 |
| 4 | FP32 | 4.721 | 4.735 | 4.738 | 4.743 | 143,585 | - | 1.63 |
| 8 | FP32 | 8.764 | 8.777 | 8.779 | 8.784 | 154,694 | - | 0.88 |
## Release notes
We're constantly refining and improving our performance on AI and HPC workloads even on the same hardware, with frequent updates to our software stack. For our latest performance data, refer to these pages for AI and HPC benchmarks.
### Changelog
October 2022
- Updated performance tables
July 2022
- Performance optimizations, speedups up to 1.2x (DGX-1) and 1.6x (DGX A100)
June 2022
- MHA bug fix affecting models with > 1 attention heads
August 2021
- Improved quality of synthesized audio
- Added capability to automatically align audio to transcripts during training without a pre-trained Tacotron 2 aligning model
- Added capability to train on both graphemes and phonemes
- Added conditioning on energy
- Faster training recipe
- F0 is now estimated with Probabilistic YIN (PYIN)
- Updated performance tables
- Changed version of FastPitch from 1.0 to 1.1
October 2020
- Added multispeaker capabilities
- Updated text processing module
June 2020
- Updated performance tables to include A100 results
May 2020
- Initial release
### Known issues
There are no known issues with this model.
|
PyTorch/SpeechSynthesis/FastPitch/platform | platform | DGXA100_FastPitch_AMP_8GPU | #!/bin/bash
set -a
: ${NUM_GPUS:=8}
: ${BATCH_SIZE:=32}
: ${GRAD_ACCUMULATION:=1}
: ${AMP:=true}
bash scripts/train.sh "$@"
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | trtUtils | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_TRTUTILS_H
#define TT2I_TRTUTILS_H
#include <string>
#include <vector>
// forward declare objects passed by reference
namespace nvinfer1
{
class Weights;
class Dims;
class ITensor;
class ICudaEngine;
class INetworkDefinition;
} // namespace nvinfer1
namespace tts
{
class TRTUtils
{
public:
/**
* @brief Convert a set of weights to a vector.
*
* @param weights The weights.
*
* @return The vector.
*/
static std::vector<float> toFloatVector(const nvinfer1::Weights& weights);
/**
* @brief Convert a vector to a Weights object. The vector must not have its
* underlying data free'd/move'd for the lifetime of the Weights object.
*
* @param vec The vector.
*
* @return The Weights object pointing to the vector.
*/
static nvinfer1::Weights toWeights(const std::vector<float>& vec);
/**
* @brief Create a string representation of a Dims object.
*
* @param dim The object to create a string representation of.
*
* @return The string represenetation.
*/
static std::string dimsToString(const nvinfer1::Dims& dim);
/**
* @brief Print the string representation of the Dims object to stdout.
*
* @param name The name to prefix the dimensions with.
* @param dim The dimensions.
*/
static void printDimensions(const std::string& name, const nvinfer1::Dims& dim);
/**
* @brief Print the input and output sizes of the engine to stdout.
*
* @param engine The engine to print the input/output of.
*/
static void printBindingDimensions(const nvinfer1::ICudaEngine& engine);
/**
* @brief Print the string representation of the tensor's Dims object to
* stdout.
*
* @param name The name of the tensor.
* @param tensor The tensor to print the dimensions of.
*/
static void printTensor(const std::string& name, const nvinfer1::ITensor& tensor);
/**
* @brief Get the total volume of a set of Dimensions (number of elements).
*
* @param dims The dimensions.
*
* @return The volume/total number of elements.
*/
static size_t getDimensionsSize(const nvinfer1::Dims& dims);
/**
* @brief Get the total number of elements in a tensor.
*
* @param tensor The tensor.
*
* @return The total number of elements.
*/
static size_t getTensorSize(const nvinfer1::ITensor& tensor);
/**
* @brief Get the maixmum size of an input/output tensor for a given
* binding in an engine.
*
* @param engine The engine.
* @param binding The binding name.
*
* @return The maximum size.
*/
static size_t getMaxBindingSize(const nvinfer1::ICudaEngine& engine, const char* const binding);
/**
* @brief Get the size of an input/output tensor for the given binding in an
* engine.
*
* @param engine The engine.
* @param bindingName The binding name.
*
* @return The number of elements in the binding.
*/
static size_t getBindingSize(const nvinfer1::ICudaEngine& engine, const char* const bindingName);
/**
* @brief Get the size of an input/output tensor for the given binding in an
* engine excluding the first dimension (assumes it to be explicit batch
* size).
*
* @param engine The engine.
* @param bindingName The binding name.
*
* @return The number of elements in the binding.
*/
static size_t getNonBatchBindingSize(const nvinfer1::ICudaEngine& engine, const char* const bindingName);
/**
* @brief Get the maximum batch size of the engine's first binding,
* using optimization
* profiles to determine the size if explicit batching is enabled, or
* directly querying the engine if implicit batching is used.
*
* @param engine The engine.
*
* @return The maximum batch size supported.
*/
static int getMaxBatchSize(const nvinfer1::ICudaEngine& engine);
/**
* @brief Get the size of specific dimension of an input/output tensor for the
* given binding in an engine.
*
* @param engine The engine.
* @param bindingName The binding name.
* @param dimension The dimension in the tensor to get the size of.
*
* @return The size of the dimension.
*/
static size_t getBindingDimension(
const nvinfer1::ICudaEngine& engine, const char* const bindingName, const int dimension);
/**
* @brief Get the input tensor by its name.
*
* @param network The network.
* @param inputName The tensor name.
*
* @return The input tensor.
*/
static nvinfer1::ITensor* getInputByName(nvinfer1::INetworkDefinition& network, const std::string& inputName);
/**
* @brief Get the first dimension with a non-unit size (greater than one). If
* no dimensions are greater than 1, but the number of dimensions is greater
* than 0, 1 is returned. If the number of dimensions is 0, then 0 is
* returned.
*
* @param dims The dimensions to search.
*
* @return The first non-unit dimension.
*/
static int getFirstNonUnitDim(const nvinfer1::Dims& dims);
/**
* @brief Get a Dims object with all 1's removed down to minLength.
* If the length of dims is less than minLength, than just dims will be
* returned. Leading 1's will be insert to reach minLength.
*
* @param dims The dimensions to compact.
*
* @return The compacted dimensions.
*/
static nvinfer1::Dims getCompactedDims(const nvinfer1::Dims& dims, const int minLength = 1);
};
} // namespace tts
#endif
|
TensorFlow/Detection/SSD/models/research/slim/datasets | datasets | dataset_factory | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A factory-pattern class which returns classification image/label pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datasets import cifar10
from datasets import flowers
from datasets import imagenet
from datasets import mnist
datasets_map = {
'cifar10': cifar10,
'flowers': flowers,
'imagenet': imagenet,
'mnist': mnist,
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/test split name.
dataset_dir: The directory where the dataset files are stored.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader)
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | inference_perf | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import models
import torch
import argparse
import numpy as np
import json
import time
import os
import sys
import random
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, MeasureTime, prepare_input_sequence
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-m', '--model-name', type=str, default='',
required=True, help='Model to train')
parser.add_argument('--model', type=str, default='',
help='Full path to the model checkpoint file')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-bs', '--batch-size', type=int, default=1)
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save results')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--synth-data', action='store_true',
help='Test with synthetic data')
return parser
def gen_text(use_synthetic_data):
batch_size = 1
text_len = 170
if use_synthetic_data:
text_padded = torch.randint(low=0, high=148,
size=(batch_size, text_len),
dtype=torch.long).cuda()
input_lengths = torch.IntTensor([text_padded.size(1)]*
batch_size).cuda().long()
else:
text = 'The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. '*2
text = [text[:text_len]]
text_padded, input_lengths = prepare_input_sequence(text)
return (text_padded, input_lengths)
def gen_mel(use_synthetic_data, n_mel_channels, fp16):
if use_synthetic_data:
batch_size = 1
num_mels = 895
mel_padded = torch.zeros(batch_size, n_mel_channels,
num_mels).normal_(-5.62, 1.98).cuda()
else:
mel_padded = torch.load("data/mel.pt")
if fp16:
mel_padded = mel_padded.half()
return mel_padded
def main():
"""
Launches inference benchmark.
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
log_file = os.path.join(args.output, args.log_file)
torch.manual_seed(1234)
random.seed(1234)
np.random.seed(1234)
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
DLLogger.metadata('infer_latency', {'unit': 's'})
DLLogger.metadata('infer_items_per_sec', {'unit': 'items/s'})
if args.synth_data:
model = load_and_setup_model(args.model_name, parser, None, args.fp16,
cpu_run=False, forward_is_infer=True)
else:
if not os.path.isfile(args.model):
print(f"File {args.model} does not exist!")
sys.exit(1)
model = load_and_setup_model(args.model_name, parser, args.model,
args.fp16, cpu_run=False,
forward_is_infer=True)
if args.model_name == "Tacotron2":
model = torch.jit.script(model)
warmup_iters = 6
num_iters = warmup_iters + 1
for i in range(num_iters):
measurements = {}
if args.model_name == 'Tacotron2':
text_padded, input_lengths = gen_text(args.synth_data)
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
mels, _, _ = model(text_padded, input_lengths)
num_items = mels.size(0)*mels.size(2)
if args.model_name == 'WaveGlow':
n_mel_channels = model.upsample.in_channels
mel_padded = gen_mel(args.synth_data, n_mel_channels, args.fp16)
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
audios = model(mel_padded)
audios = audios.float()
num_items = audios.size(0)*audios.size(1)
if i >= warmup_iters:
DLLogger.log(step=(i-warmup_iters,), data={"latency": measurements['inference_time']})
DLLogger.log(step=(i-warmup_iters,), data={"items_per_sec": num_items/measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_latency': measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_items_per_sec': num_items/measurements['inference_time']})
DLLogger.flush()
if __name__ == '__main__':
main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/bermuda | bermuda | utils | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List, Optional
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
def _mark_batch_axis(shape, batch_axis: int):
shape = list(shape)
shape[batch_axis] = -1
return tuple(shape)
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
if batch_size_dim is not None:
input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()}
output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()}
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k in all_shapes:
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape))
max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape))
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
|
PyTorch/Recommendation/NCF | NCF | test_featurespec_correctness | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from feature_spec import FeatureSpec
from neumf_constants import TEST_SAMPLES_PER_SERIES
from dataloading import TorchTensorDataset
import torch
import os
import sys
def test_matches_template(path, template_path):
loaded_featurespec_string = FeatureSpec.from_yaml(path).to_string()
loaded_template_string = FeatureSpec.from_yaml(template_path).to_string()
assert loaded_template_string == loaded_featurespec_string
def mock_args():
class Obj:
pass
args = Obj()
args.__dict__['local_rank'] = 0
return args
def test_dtypes(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
features = loaded_featurespec.feature_spec
declared_dtypes = {name: data['dtype'] for name, data in features.items()}
source_spec = loaded_featurespec.source_spec
for mapping in source_spec.values():
for chunk in mapping:
chunk_dtype = None
for present_feature in chunk['features']:
assert present_feature in declared_dtypes, "unknown feature in mapping"
# Check declared type
feature_dtype = declared_dtypes[present_feature]
if chunk_dtype is None:
chunk_dtype = feature_dtype
else:
assert chunk_dtype == feature_dtype
path_to_load = os.path.join(loaded_featurespec.base_directory, chunk['files'][0])
loaded_data = torch.load(path_to_load)
assert str(loaded_data.dtype) == chunk_dtype
def test_cardinalities(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
features = loaded_featurespec.feature_spec
declared_cardinalities = {name: data['cardinality'] for name, data in features.items() if 'cardinality' in data}
source_spec = loaded_featurespec.source_spec
for mapping_name, mapping in source_spec.items():
dataset = TorchTensorDataset(loaded_featurespec, mapping_name, mock_args())
for feature_name, cardinality in declared_cardinalities.items():
feature_data = dataset.features[feature_name]
biggest_num = feature_data.max().item()
assert biggest_num < cardinality
def test_samples_in_test_series(path):
loaded_featurespec = FeatureSpec.from_yaml(path)
series_length = loaded_featurespec.metadata[TEST_SAMPLES_PER_SERIES]
dataset = TorchTensorDataset(loaded_featurespec, 'test', mock_args())
for feature in dataset.features.values():
assert len(feature) % series_length == 0
if __name__ == '__main__':
tested_spec = sys.argv[1]
template = sys.argv[2]
test_cardinalities(tested_spec)
test_dtypes(tested_spec)
test_samples_in_test_series(tested_spec)
test_matches_template(tested_spec, template)
|
TensorFlow2/Recommendation/SIM/sim/models | models | sim_model | # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import tensorflow as tf
from sim.layers.ctr_classification_mlp import CTRClassificationMLP
from sim.layers.item_item_interaction import DotItemItemInteraction
from sim.layers.item_sequence_interaction import DIENItemSequenceInteractionBlock, DINItemSequenceInteractionBlock
from sim.models.dien_model import compute_auxiliary_probs
from sim.models.sequential_recommender_model import SequentialRecommenderModel
@tf.function
def masked_temporal_mean(sequence_batch, mask):
masked_sum = tf.reduce_sum(sequence_batch * mask[:, :, None], 1)
masked_counts = tf.reduce_sum(mask, 1, keepdims=True)
return masked_sum / (masked_counts + 1.0)
class SIMModel(SequentialRecommenderModel):
def __init__(self, feature_spec, mlp_hidden_dims, embedding_dim=4, k=50, dropout_rate=-1):
super(SIMModel, self).__init__(
feature_spec, embedding_dim
)
self.k = k
self.stage_one_classifier = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["stage_1"],
dropout_rate=dropout_rate
)
self.stage_two_classifier = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["stage_2"],
dropout_rate=dropout_rate
)
self.stage_two_auxiliary_net = CTRClassificationMLP(
layer_sizes=mlp_hidden_dims["aux"],
activation_function=partial(
tf.keras.layers.Activation, activation="sigmoid"
),
dropout_rate=dropout_rate
)
self.stage_one_item_seq_interaction = DINItemSequenceInteractionBlock(
item_item_interaction=DotItemItemInteraction()
)
self.stage_two_item_seq_interaction = DIENItemSequenceInteractionBlock(
hidden_size=embedding_dim * 6
)
def select_top_k_items(self, embeddings, scores):
top_k = tf.math.top_k(scores, k=self.k)
top_k_values, top_k_indices = top_k.values, top_k.indices
top_k_mask = tf.cast(tf.greater(top_k_values, tf.zeros_like(top_k_values)), embeddings.dtype)
best_k_embeddings = tf.gather(embeddings, top_k_indices, batch_dims=1)
return best_k_embeddings, top_k_mask
@tf.function
def call(
self,
inputs,
compute_aux_loss=True,
training=False,
):
user_features = inputs["user_features"]
target_item_features = inputs["target_item_features"]
long_sequence_features = inputs["long_sequence_features"]
short_sequence_features = inputs["short_sequence_features"]
short_neg_sequence_features = inputs["short_neg_sequence_features"]
long_sequence_mask = inputs["long_sequence_mask"]
short_sequence_mask = inputs["short_sequence_mask"]
output_dict = {}
# GSU Stage
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
long_sequence_embeddings = self.embed(long_sequence_features)
long_sequence_embeddings = long_sequence_embeddings * tf.expand_dims(
long_sequence_mask, axis=-1
)
stage_one_interaction_embedding, gsu_scores = self.stage_one_item_seq_interaction(
(target_item_embedding, long_sequence_embeddings, long_sequence_mask)
)
# combine all the stage 1 embeddings
stage_one_embeddings = tf.concat(
[target_item_embedding, stage_one_interaction_embedding, user_embedding], -1
)
stage_one_logits = self.stage_one_classifier(
stage_one_embeddings, training=training
)
# ESU Stage
user_embedding = self.embed(user_features)
target_item_embedding = self.embed(target_item_features)
short_sequence_embeddings = self.embed(short_sequence_features)
short_sequence_embeddings = short_sequence_embeddings * tf.expand_dims(
short_sequence_mask, axis=-1
)
# ---- Attention part
# Take embeddings of k best items produced by GSU at Stage 1
best_k_long_seq_embeddings, top_k_mask = self.select_top_k_items(
long_sequence_embeddings, gsu_scores
)
# Run attention mechanism to produce a single representation
att_fea, _ = self.stage_one_item_seq_interaction(
(target_item_embedding, best_k_long_seq_embeddings, top_k_mask),
)
# Take a mean representation of best_k_long_seq_embeddings
item_his_sum_emb = masked_temporal_mean(best_k_long_seq_embeddings, top_k_mask)
# ---- DIEN part
(
stage_two_interaction_embedding,
short_features_layer_1,
) = self.stage_two_item_seq_interaction(
(target_item_embedding, short_sequence_embeddings, short_sequence_mask),
)
# Compute auxiliary logits for DIEN
if compute_aux_loss:
# Embed negative sequence features
short_neg_sequence_embeddings = self.embed(short_neg_sequence_features)
short_neg_sequence_embeddings = (
short_neg_sequence_embeddings
* tf.expand_dims(short_sequence_mask, axis=-1)
)
aux_click_probs = compute_auxiliary_probs(
self.stage_two_auxiliary_net,
short_features_layer_1,
short_sequence_embeddings,
training=training,
)
output_dict["aux_click_probs"] = aux_click_probs
aux_noclick_probs = compute_auxiliary_probs(
self.stage_two_auxiliary_net,
short_features_layer_1,
short_neg_sequence_embeddings,
training=training,
)
output_dict["aux_noclick_probs"] = aux_noclick_probs
# combine all the stage 2 embeddings
stage_two_embeddings = tf.concat(
[
att_fea,
item_his_sum_emb,
target_item_embedding,
stage_two_interaction_embedding,
user_embedding
],
-1,
)
stage_two_logits = self.stage_two_classifier(
stage_two_embeddings, training=training
)
output_dict["stage_one_logits"] = stage_one_logits
output_dict["stage_two_logits"] = stage_two_logits
return output_dict
|
TensorFlow/LanguageModeling/BERT/data | data | PubMedDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import glob
import gzip
import os
import urllib.request
import shutil
import sys
class PubMedDownloader:
def __init__(self, subset, save_path):
self.subset = subset
# Modifying self.save_path in two steps to handle creation of subdirectories
self.save_path = save_path + '/pubmed' + '/'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.save_path = self.save_path + '/' + subset
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.download_urls = {
'baseline' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/',
'daily_update' : 'ftp://ftp.ncbi.nlm.nih.gov/pubmed/updatefiles/',
'fulltext' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/',
'open_access' : 'ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/oa_bulk/'
}
def download(self):
print('subset:', self.subset)
url = self.download_urls[self.subset]
self.download_files(url)
self.extract_files()
def download_files(self, url):
url = self.download_urls[self.subset]
output = os.popen('curl ' + url).read()
if self.subset == 'fulltext' or self.subset == 'open_access':
line_split = 'comm_use' if self.subset == 'fulltext' else 'non_comm_use'
for line in output.splitlines():
if line[-10:] == 'xml.tar.gz' and \
line.split(' ')[-1].split('.')[0] == line_split:
file = os.path.join(self.save_path, line.split(' ')[-1])
if not os.path.isfile(file):
print('Downloading', file)
response = urllib.request.urlopen(url + line.split(' ')[-1])
with open(file, "wb") as handle:
handle.write(response.read())
elif self.subset == 'baseline' or self.subset == 'daily_update':
for line in output.splitlines():
if line[-3:] == '.gz':
file = os.path.join(self.save_path, line.split(' ')[-1])
if not os.path.isfile(file):
print('Downloading', file)
response = urllib.request.urlopen(url + line.split(' ')[-1])
with open(file, "wb") as handle:
handle.write(response.read())
else:
assert False, 'Invalid PubMed dataset/subset specified.'
def extract_files(self):
files = glob.glob(self.save_path + '/*.xml.gz')
for file in files:
print('file:', file)
input = gzip.GzipFile(file, mode='rb')
s = input.read()
input.close()
out = open(file[:-3], mode='wb')
out.write(s)
out.close()
|
PyTorch/LanguageModeling/BERT/lamb_amp_opt/csrc | csrc | type_shim | #include <ATen/ATen.h>
#include "compat.h"
// Forward/backward compatiblity hack around
// https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288
// pending more future-proof guidance from upstream.
// struct TypeShim
// {
// const at::Type& payload;
// TypeShim(const at::Type& type) : payload(type) {}
// // Enable trivial conversion to a const at::Type& for pre-3aeb78
// operator const at::Type&(){ return payload; };
// // Enable dispatch switch statements to take *this directly for post-3aeb78
// //operator at::ScalarType(){ return payload.; };
// };
#define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_FLOAT_HALF_AND_BYTE(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Byte: \
{ \
using scalar_t_##LEVEL = uint8_t; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Double: \
{ \
using scalar_t_##LEVEL = double; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Half: \
{ \
using scalar_t_##LEVEL = at::Half; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
#define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \
switch(TYPE) \
{ \
case at::ScalarType::Double: \
{ \
using scalar_t_##LEVEL = double; \
__VA_ARGS__; \
break; \
} \
case at::ScalarType::Float: \
{ \
using scalar_t_##LEVEL = float; \
__VA_ARGS__; \
break; \
} \
default: \
AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
}
template<typename T>
__device__ __forceinline__ T reduce_block_into_lanes
(T *x,
T val,
int lanes=1,
bool share_result=false) // lanes is intended to be <= 32.
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = x[tid] + x[tid+i];
__syncthreads();
}
T final;
if(tid < 32)
{
if(blockSize >= 64)
final = x[tid] + x[tid+32];
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = final + __shfl_down_sync(0xffffffff, final, i);
}
if(share_result)
{
if(tid < lanes)
x[tid] = final; // EpilogueOp
// Make sure the smem result is visible to all warps.
__syncthreads();
}
return final;
}
template<typename T>
__device__ __forceinline__ T reduce_block_into_lanes_max_op
(T *x,
T val,
int lanes=1,
bool share_result=false) // lanes is intended to be <= 32.
{
int tid = threadIdx.x + threadIdx.y*blockDim.x;
int blockSize = blockDim.x*blockDim.y; // blockSize is intended to be a multiple of 32.
if(blockSize >= 64)
{
x[tid] = val;
__syncthreads();
}
#pragma unroll
for(int i = (blockSize >> 1); i >= 64; i >>= 1)
{
if(tid < i)
x[tid] = fmaxf(fabsf(x[tid]), fabsf(x[tid+i]));
__syncthreads();
}
T final;
if(tid < 32)
{
if(blockSize >= 64)
final = fmaxf(fabsf(x[tid]), fabsf(x[tid+32]));
else
final = val;
// __SYNCWARP();
#pragma unroll
for(int i = 16; i >= lanes; i >>= 1)
final = fmaxf(fabsf(final), fabsf(__shfl_down_sync(0xffffffff, final, i)));
}
if(share_result)
{
if(tid < lanes)
x[tid] = final; // EpilogueOp
// Make sure the smem result is visible to all warps.
__syncthreads();
}
return final;
}
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | graph_rewriter_builder | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for quantized training and evaluation."""
import tensorflow as tf
def build(graph_rewriter_config, is_training):
"""Returns a function that modifies default graph based on options.
Args:
graph_rewriter_config: graph_rewriter_pb2.GraphRewriter proto.
is_training: whether in training of eval mode.
"""
def graph_rewrite_fn():
"""Function to quantize weights and activation of the default graph."""
if (graph_rewriter_config.quantization.weight_bits != 8 or
graph_rewriter_config.quantization.activation_bits != 8):
raise ValueError('Only 8bit quantization is supported')
# Quantize the graph by inserting quantize ops for weights and activations
if is_training:
tf.contrib.quantize.create_training_graph(
input_graph=tf.get_default_graph(),
quant_delay=graph_rewriter_config.quantization.delay)
else:
tf.contrib.quantize.create_eval_graph(input_graph=tf.get_default_graph())
tf.contrib.layers.summarize_collection('quant_vars')
return graph_rewrite_fn
|
PyTorch/Recommendation/DLRM/dlrm/scripts | scripts | utils | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import time
from collections import defaultdict, deque
import dllogger
import torch
import torch.distributed as dist
from dlrm.utils.distributed import is_dist_avail_and_initialized
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item() if len(self.deque) else 0
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count if self.count else 0
@property
def max(self):
return max(self.deque) if len(self.deque) else 0
@property
def value(self):
return self.deque[-1] if len(self.deque) else None
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def print(self, header=None):
if not header:
header = ''
print_str = header
for name, meter in self.meters.items():
print_str += f" {name}: {meter}"
print(print_str)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def lr_step(optim, num_warmup_iter, current_step, base_lr, warmup_factor, decay_steps=0, decay_start_step=None):
if decay_start_step is None:
decay_start_step = num_warmup_iter
new_lr = base_lr
if decay_start_step < num_warmup_iter:
raise ValueError('Learning rate warmup must finish before decay starts')
if current_step <= num_warmup_iter:
warmup_step = base_lr / (num_warmup_iter * (2 ** warmup_factor))
new_lr = base_lr - (num_warmup_iter - current_step) * warmup_step
steps_since_decay_start = current_step - decay_start_step
if decay_steps != 0 and steps_since_decay_start > 0:
already_decayed_steps = min(steps_since_decay_start, decay_steps)
new_lr = base_lr * ((decay_steps - already_decayed_steps) / decay_steps) ** 2
min_lr = 0.0000001
new_lr = max(min_lr, new_lr)
for param_group in optim.param_groups:
param_group['lr'] = new_lr
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def init_logging(log_path):
json_backend = dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=log_path)
stdout_backend = dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)
stdout_backend._metadata['best_auc'].update({'format': '0:.5f'})
stdout_backend._metadata['best_epoch'].update({'format': '0:.2f'})
stdout_backend._metadata['average_train_throughput'].update({'format': ':.2e'})
stdout_backend._metadata['average_test_throughput'].update({'format': ':.2e'})
stdout_backend._metadata['training_loss'].update({'format': '0:.5f'})
stdout_backend._metadata['best_validation_loss'].update({'format': '0:.5f'})
dllogger.init(backends=[json_backend, stdout_backend])
dllogger.metadata("best_auc", {"unit": None})
dllogger.metadata("mean_inference_latency_batch_1", {"unit": "s"})
dllogger.metadata("mean_inference_latency_batch_64", {"unit": "s"})
dllogger.metadata("mean_inference_latency_batch_4096", {"unit": "s"})
dllogger.metadata("average_train_throughput", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_1", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_64", {"unit": "samples/s"})
dllogger.metadata("mean_inference_throughput_batch_4096", {"unit": "samples/s"})
class StepTimer():
def __init__(self):
self._previous = None
self._new = None
self.measured = None
def click(self, synchronize=False):
self._previous = self._new
if synchronize:
torch.cuda.synchronize()
self._new = time.time()
if self._previous is not None:
self.measured = self._new - self._previous
class LearningRateScheduler:
"""Polynomial learning rate decay for multiple optimizers and multiple param groups
Args:
optimizers (list): optimizers for which to apply the learning rate changes
base_lrs (list): a nested list of base_lrs to use for each param_group of each optimizer
warmup_steps (int): number of linear warmup steps to perform at the beginning of training
warmup_factor (int)
decay_steps (int): number of steps over which to apply poly LR decay from base_lr to 0
decay_start_step (int): the optimization step at which to start decaying the learning rate
if None will start the decay immediately after
decay_power (float): polynomial learning rate decay power
end_lr_factor (float): for each optimizer and param group:
lr = max(current_lr_factor, end_lr_factor) * base_lr
Example:
lr_scheduler = LearningRateScheduler(optimizers=[optimizer], base_lrs=[[lr]],
warmup_steps=100, warmup_factor=0,
decay_start_step=1000, decay_steps=2000,
decay_power=2, end_lr_factor=1e-6)
for batch in data_loader:
lr_scheduler.step()
# foward, backward, weight update
"""
def __init__(self, optimizers, base_lrs, warmup_steps, warmup_factor,
decay_steps, decay_start_step, decay_power=2, end_lr_factor=0):
self.current_step = 0
self.optimizers = optimizers
self.base_lrs = base_lrs
self.warmup_steps = warmup_steps
self.warmup_factor = warmup_factor
self.decay_steps = decay_steps
self.decay_start_step = decay_start_step
self.decay_power = decay_power
self.end_lr_factor = end_lr_factor
self.decay_end_step = self.decay_start_step + self.decay_steps
if self.decay_start_step < self.warmup_steps:
raise ValueError('Learning rate warmup must finish before decay starts')
def _compute_lr_factor(self):
lr_factor = 1
if self.current_step <= self.warmup_steps:
warmup_step = 1 / (self.warmup_steps * (2 ** self.warmup_factor))
lr_factor = 1 - (self.warmup_steps - self.current_step) * warmup_step
elif self.decay_start_step < self.current_step <= self.decay_end_step:
lr_factor = ((self.decay_end_step - self.current_step) / self.decay_steps) ** self.decay_power
lr_factor = max(lr_factor, self.end_lr_factor)
elif self.current_step > self.decay_end_step:
lr_factor = self.end_lr_factor
return lr_factor
def step(self):
self.current_step += 1
lr_factor = self._compute_lr_factor()
for optim, base_lrs in zip(self.optimizers, self.base_lrs):
for group_id, base_lr in enumerate(base_lrs):
optim.param_groups[group_id]['lr'] = base_lr * lr_factor
def roc_auc_score(y_true, y_score):
"""ROC AUC score in PyTorch
Args:
y_true (Tensor):
y_score (Tensor):
"""
device = y_true.device
y_true.squeeze_()
y_score.squeeze_()
if y_true.shape != y_score.shape:
raise TypeError(f"Shape of y_true and y_score must match. Got {y_true.shape()} and {y_score.shape()}.")
desc_score_indices = torch.argsort(y_score, descending=True)
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
distinct_value_indices = torch.nonzero(y_score[1:] - y_score[:-1], as_tuple=False).squeeze()
threshold_idxs = torch.cat([distinct_value_indices, torch.tensor([y_true.numel() - 1], device=device)])
tps = torch.cumsum(y_true, dim=0)[threshold_idxs]
fps = 1 + threshold_idxs - tps
tps = torch.cat([torch.zeros(1, device=device), tps])
fps = torch.cat([torch.zeros(1, device=device), fps])
fpr = fps / fps[-1]
tpr = tps / tps[-1]
area = torch.trapz(tpr, fpr).item()
return area
|
PyTorch/Segmentation/nnUNet/triton | triton | requirements | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
networkx==2.5
onnx==1.8.0
onnxruntime==1.5.2
pycuda>=2019.1.2
PyYAML>=5.2
tqdm>=4.44.1
tabulate>=0.8.7
natsort>=7.0.0
# use tags instead of branch names - because there might be docker cache hit causing not fetching most recent changes on branch
model_navigator @ git+https://github.com/triton-inference-server/model_navigator.git@v0.1.0#egg=model_navigator
|
PyTorch/SpeechSynthesis/FastPitch/triton | triton | prepare_input_data | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import numpy as np
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataloader', type=str, required=True,
help='Path to file containing get_dataloader function')
parser.add_argument('--input-data-dir', type=str, required=True,
help='Path to directory where input data for perf client will be saved')
parser.add_argument('--dataset-path', required=False, help='Path to the datset')
parser.add_argument('--precision', type=str, default="fp16",
help='Precision for the generated input data')
parser.add_argument('--length', type=int, required=True,
help='Length of the generated input data')
args = parser.parse_args()
args.batch_size = 1
return args
def main():
args = parse_args()
spec = importlib.util.spec_from_file_location('dataloader', args.dataloader)
dm = importlib.util.module_from_spec(spec)
spec.loader.exec_module(dm)
dataloader = dm.get_dataloader_fn(dataset_path=args.dataset_path,
batch_size=1,
precision=args.precision)
_, x, _ = next(dataloader())
for name, t in x.items():
if name == 'INPUT__0':
if t.shape[1] > args.length:
t = t[:,:,:args.length]
elif t.shape[1] < args.length:
num_tiles = int(np.ceil(1.0*args.length/t.shape[1]))
t = np.tile(t, (1,1,num_tiles))
t = t[:,:,:args.length]
t.tofile(os.path.join(args.input_data_dir, name))
if __name__ == '__main__':
main()
|
TensorFlow2/Detection/Efficientdet/object_detection | object_detection | tf_example_decoder | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(parsed_tensors['image/encoded'],
2**63 - 1))
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False, regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.shape(masks)[0], 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors['image/height'], -1),
tf.equal(parsed_tensors['image/width'], -1))
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors['image/height'] = tf.where(decode_image_shape,
image_shape[0],
parsed_tensors['image/height'])
parsed_tensors['image/width'] = tf.where(decode_image_shape, image_shape[1],
parsed_tensors['image/width'])
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']),
0), lambda: parsed_tensors['image/source_id'],
lambda: _get_source_id_from_encoded_image(parsed_tensors))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
|
PyTorch/SpeechSynthesis/Tacotron2/tensorrt | tensorrt | convert_tacotron22onnx | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import sys
sys.path.append('./')
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron 2 ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
sys.path.append('./tensorrt')
from inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
dtype = memory.dtype
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.output+"/"+"encoder.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {1: "text_seq"},
"memory": {1: "mem_seq"},
"processed_memory": {1: "mem_seq"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.output+"/"+"decoder_iter.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"attention_weights" : {1: "seq_len"},
"attention_weights_cum" : {1: "seq_len"},
"memory" : {1: "seq_len"},
"processed_memory" : {1: "seq_len"},
"mask" : {1: "seq_len"},
"out_attention_weights" : {1: "seq_len"},
"out_attention_weights_cum" : {1: "seq_len"}
})
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.output+"/"+"postnet.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {2: "mel_seq"},
"mel_outputs_postnet": {2: "mel_seq"}})
mel = test_inference(encoder, decoder_iter, postnet)
torch.save(mel, "mel.pt")
if __name__ == '__main__':
main()
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_FP32_4GPU_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32 on 4 GPUs using 16 batch size (4 per GPU)
# Usage ./UNet_FP32_4GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
mpirun \
-np 4 \
-H localhost:4 \
-bind-to none \
-map-by slot \
-x NCCL_DEBUG=VERSION \
-x LD_LIBRARY_PATH \
-x PATH \
-mca pml ob1 -mca btl ^openib \
--allow-run-as-root \
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=4 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--nouse_tf_amp \
--use_xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/perf_analyzer | perf_analyzer | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .perf_analyzer import PerfAnalyzer # noqa: F401
from .perf_config import PerfAnalyzerConfig # noqa: F401
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/library | library | onnx | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import (
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
TensorSpec,
TimeMeasurement,
)
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
with TimeMeasurement(self):
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
# def __call__(self, x: Dict[str, object]):
# io_binding = self._session.io_binding()
#
# for input_name in self._input_names:
# input = x[input_name]
# ortinput = onnxruntime.OrtValue.ortvalue_from_numpy(input, "cuda", 0)
# io_binding.bind_input(input_name, "cuda", 0, input.dtype, input.shape, ortinput.data_ptr())
#
# for output_name in self._output_names:
# io_binding.bind_output(output_name)
#
# with TimeMeasurement(self):
# self._session.run_with_iobinding(io_binding)
# y_pred = io_binding.copy_outputs_to_cpu()
#
# y_pred = dict(zip(self._output_names, y_pred))
#
# return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
PyTorch/Classification/ConvNets/image_classification | image_classification | __init__ | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#from . import logger
#from . import dataloaders
#from . import training
#from . import utils
#from . import mixup
#from . import smoothing
from . import models
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets | datasets | tabformer | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import json
import logging
import shutil
from typing import Optional
import cudf
import cupy as cp
import numpy as np
import pandas as pd
from syngen.utils.types import DataFrameType
from syngen.configuration import SynGenDatasetFeatureSpec
from syngen.preprocessing.base_preprocessing import BasePreprocessing
from syngen.utils.types import MetaData
class TabFormerPreprocessing(BasePreprocessing):
"""
preprocessing for https://github.com/IBM/TabFormer
"""
def __init__(
self,
source_path: str,
destination_path: Optional[str] = None,
download: bool = False,
**kwargs,
):
super().__init__(source_path, destination_path, download, **kwargs)
@staticmethod
def nanNone(X: DataFrameType) -> DataFrameType:
return X.where(X.notnull(), "None")
@staticmethod
def amountEncoder(X: DataFrameType) -> DataFrameType:
return (
X.str.slice(start=1)
.astype(float)
.clip(lower=1.0)
.map(lambda x: math.log(x))
)
def transform(self, gpu=False, use_cache=False) -> SynGenDatasetFeatureSpec:
if use_cache and os.path.exists(self.destination_path):
return SynGenDatasetFeatureSpec.instantiate_from_preprocessed(self.destination_path)
operator = cp if gpu else np
tabular_operator = cudf if gpu else pd
data = tabular_operator.read_csv(os.path.join(self.source_path, 'card_transaction.v2.csv'))
data.columns = [
i.lower().replace(" ", "_") for i in data.columns.tolist()
]
data = data.rename(
columns={"is_fraud?": "is_fraud", "errors?": "errors", "merchant_name": "merchant_id"}
)
data['card_id'] = data['user'] + data['card']
data.drop(columns=['user', 'card'], inplace=True)
data["errors"] = data["errors"].fillna(0)
data["use_chip"] = self.nanNone(data["use_chip"])
data["amount"] = self.amountEncoder(data["amount"])
cont_columns = ["amount"]
cat_columns = ["use_chip", "errors", "is_fraud"]
for col in ("card_id", "merchant_id", *cat_columns):
data[col] = data[col].astype("category").cat.codes
data[col] = data[col].astype(int)
structural_data = data[['card_id', 'merchant_id']]
tabular_data = data[[*cat_columns, *cont_columns]]
edge_features = self._prepare_feature_list(tabular_data, cat_columns, cont_columns)
graph_metadata = {
MetaData.NODES: [
{
MetaData.NAME: "card",
MetaData.COUNT: int(structural_data['card_id'].max()),
MetaData.FEATURES: [],
MetaData.FEATURES_PATH: None,
},
{
MetaData.NAME: "merchant",
MetaData.COUNT: int(structural_data['merchant_id'].max()),
MetaData.FEATURES: [],
MetaData.FEATURES_PATH: None,
}
],
MetaData.EDGES: [
{
MetaData.NAME: "transaction",
MetaData.COUNT: len(structural_data),
MetaData.SRC_NODE_TYPE: "card",
MetaData.DST_NODE_TYPE: "merchant",
MetaData.DIRECTED: False,
MetaData.FEATURES: edge_features,
MetaData.FEATURES_PATH: "transaction.parquet",
MetaData.STRUCTURE_PATH: "transaction_edge_list.parquet",
}
]
}
shutil.rmtree(self.destination_path, ignore_errors=True)
os.makedirs(self.destination_path)
tabular_data.to_parquet(os.path.join(self.destination_path, "transaction.parquet"))
structural_data.to_parquet(os.path.join(self.destination_path, "transaction_edge_list.parquet"))
with open(os.path.join(self.destination_path, 'graph_metadata.json'), 'w') as f:
json.dump(graph_metadata, f, indent=4)
graph_metadata[MetaData.PATH] = self.destination_path
return SynGenDatasetFeatureSpec(graph_metadata)
def download(self):
raise NotImplementedError(
"TabFormer dataset does not support automatic downloading. Please run /workspace/scripts/get_datasets.sh"
)
def _check_files(self) -> bool:
files = ['card_transaction.v2.csv']
return all(os.path.exists(os.path.join(self.source_path, file)) for file in files)
|
TensorFlow/Translation/GNMT/examples | examples | DGX1_AMP_8GPU | python nmt.py --output_dir=results --batch_size=1024 --learning_rate=2e-3 --num_gpus=8 --amp
|
PyTorch/SpeechRecognition/wav2vec2/utils | utils | combine_w2v2_filelist_with_phone_alignments | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--manifest', type=Path, nargs='+',
help='w2v2 manifest files with <ID> <duration> on every line')
parser.add_argument(
'--alignments', type=Path,
help='CPC_audio alignments with <ID> <PHONE_ID_LIST> on every line')
parser.add_argument(
'--ids', type=Path,
help='List of IDs for this split (train/test, one per line)')
parser.add_argument(
'--out', type=Path,
help='Output manifest fpath')
args = parser.parse_args()
header = None
fpaths = {}
durs = {}
alis = {}
ids = []
out = []
for fpath in args.manifest:
print(f'Loading {fpath}')
with open(fpath) as f:
for i, line in enumerate(f):
if i == 0:
header = line.strip()
continue
fp, dur = line.split()
id = Path(fp).stem
fpaths[id] = fp
durs[id] = dur # int(dur)
with open(args.alignments) as f:
for line in f:
id, ph = line.strip().split(' ', 1)
alis[id] = ph
ids = [line.strip() for line in open(args.ids)]
for id in ids:
fp = fpaths[id]
d = durs[id]
a = alis[id]
out.append([fp, d, a])
with open(args.out.with_suffix('.tsv'), 'w') as f:
f.write(header + '\n')
for o in out:
f.write('\t'.join(o[:2]) + '\n')
with open(args.out.with_suffix('.ph'), 'w') as f:
for o in out:
f.write(o[2] + '\n')
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | mask_rcnn_inception_v2_coco | # Mask R-CNN with Inception V2
# Configured for MSCOCO Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 800
max_dimension: 1365
}
}
number_of_stages: 3
feature_extractor {
type: 'faster_rcnn_inception_v2'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
predict_instance_masks: true
mask_height: 15
mask_width: 15
mask_prediction_conv_depth: 0
mask_prediction_num_conv_layers: 2
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
second_stage_mask_prediction_loss_weight: 4.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0002
schedule {
step: 900000
learning_rate: .00002
}
schedule {
step: 1200000
learning_rate: .000002
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
load_instance_masks: true
mask_type: PNG_MASKS
}
eval_config: {
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
load_instance_masks: true
mask_type: PNG_MASKS
shuffle: false
num_readers: 1
}
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/criterion/overrides | overrides | quantile_overrides | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trainer:
criterion:
quantiles: [0.1, 0.5, 0.9]
model:
config:
quantiles: [0.1, 0.5, 0.9]
output_selector: 1
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular | tabular | uniform_generator | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import pickle
from typing import Optional, List, Union
from tqdm import tqdm
import cupy as cp
import numpy as np
import pandas as pd
from sklearn.preprocessing import OrdinalEncoder
from pandas.api.types import is_integer_dtype
from syngen.generator.tabular.chunked_tabular_generator import ChunkedBaseTabularGenerator
class UniformGenerator(ChunkedBaseTabularGenerator):
"""Uniform random feature generator.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def ordinal_encoder(self, cat_col):
encoder = OrdinalEncoder()
encoder.fit(cat_col)
return encoder
def fit(
self,
data,
categorical_columns=(),
samples: Union[float, int] = 0.1,
columns: Optional[List[str]] = None,
verbose: bool = False,
):
"""Computes the min and max ranges of the columns.
Args:
data: input data to use for extracting column statistics
categorical_columns (list): list of columns that should be treated as categorical.
verbose (bool): print intermediate results (default: False)
"""
if samples > 0:
num_samples = len(data)
if 0.0 <= samples <= 1.0:
num_samples = samples * num_samples
else:
num_samples = samples
num_samples = min(int(num_samples), 10_000_000)
data = data.sample(n=num_samples)
self.column_order = columns or list(data.columns)
self.cat_fit = {}
self.categorical_columns = set(categorical_columns)
self.continuous_columns = set(self.column_order) - self.categorical_columns
cat_cols = tqdm(self.categorical_columns) if verbose else self.categorical_columns
for column in cat_cols:
enc = self.ordinal_encoder(data[column].values.reshape(-1, 1))
n_unique = len(enc.categories_[0])
self.cat_fit[column] = {
"encoder": enc,
"n_unique": n_unique,
"sampler": partial(np.random.randint, 0, n_unique),
'dtype': data[column].dtype,
}
self.cont_fit = {}
self.integer_continuous_columns = []
cont_cols = tqdm(self.continuous_columns) if verbose else self.continuous_columns
for column in cont_cols:
min_, max_ = data[column].min(), data[column].max()
self.cont_fit[column] = {
"min": min_,
"max": max_,
"sampler": partial(np.random.uniform, min_, max_),
'dtype': data[column].dtype,
}
if is_integer_dtype(data[column].dtype):
self.integer_continuous_columns.append(column)
self.fits = {**self.cat_fit, **self.cont_fit}
def sample(self, n, gpu=False, memmap_kwargs=None, start_idx=0, end_idx=None, **kwargs):
use_memmap = memmap_kwargs is not None
if use_memmap:
memmap_outfile = np.load(memmap_kwargs['filename'], mmap_mode='r+')
if gpu:
cont_min = []
cont_max = []
for column in self.continuous_columns:
cont_min.append(self.fits[column]['min'])
cont_max.append(self.fits[column]['max'])
cont_data = cp.random.uniform(
cp.array(cont_min),
cp.array(cont_max),
size=(n, len(self.continuous_columns)),
dtype=cp.float32
)
cont_data = cp.asnumpy(cont_data)
df = pd.DataFrame(cont_data, columns=list(self.continuous_columns))
if self.integer_continuous_columns:
df[self.integer_continuous_columns] = \
df[self.integer_continuous_columns].astype(np.int32)
for column in self.categorical_columns:
sampled_data = cp.random.randint(0, self.fits[column]["n_unique"], size=n, dtype=cp.int32)
sampled_data = cp.asnumpy(sampled_data.reshape(-1, 1))
encoder = self.fits[column]["encoder"]
sampled_data = encoder.inverse_transform(sampled_data)
df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"])
else:
df = pd.DataFrame()
for column in self.column_order:
sampler = self.fits[column]["sampler"]
sampled_data = sampler(n)
sampled_data = sampled_data.reshape(-1, 1)
if "encoder" in self.fits[column]:
encoder = self.fits[column]["encoder"]
sampled_data = encoder.inverse_transform(sampled_data)
df[column] = sampled_data.reshape(-1).astype(self.fits[column]["dtype"])
df = df[self.column_order]
if use_memmap:
memmap_outfile[start_idx:end_idx] = df.values
return None
return df
def _space_complexity_factor(self):
return 2.5
def save(self, path):
with open(path, 'wb') as file_handler:
pickle.dump(self, file_handler, protocol=pickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, path):
with open(path, 'rb') as file_handler:
model = pickle.load(file_handler)
return model
|
TensorFlow/Classification/ConvNets/triton | triton | convert_model | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | hyperparams_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder function to construct tf-slim arg_scope for convolution, fc ops."""
import tensorflow as tf
from object_detection.core import freezable_batch_norm
from object_detection.protos import hyperparams_pb2
from object_detection.utils import context_manager
slim = tf.contrib.slim
class KerasLayerHyperparams(object):
"""
A hyperparameter configuration object for Keras layers used in
Object Detection models.
"""
def __init__(self, hyperparams_config):
"""Builds keras hyperparameter config for layers based on the proto config.
It automatically converts from Slim layer hyperparameter configs to
Keras layer hyperparameters. Namely, it:
- Builds Keras initializers/regularizers instead of Slim ones
- sets weights_regularizer/initializer to kernel_regularizer/initializer
- converts batchnorm decay to momentum
- converts Slim l2 regularizer weights to the equivalent Keras l2 weights
Contains a hyperparameter configuration for ops that specifies kernel
initializer, kernel regularizer, activation. Also contains parameters for
batch norm operators based on the configuration.
Note that if the batch_norm parameters are not specified in the config
(i.e. left to default) then batch norm is excluded from the config.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
self._batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
self._batch_norm_params = _build_keras_batch_norm_params(
hyperparams_config.batch_norm)
self._activation_fn = _build_activation_fn(hyperparams_config.activation)
# TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
# (Those might use depthwise_* instead of kernel_*)
# We should probably switch to using build_conv2d_layer and
# build_depthwise_conv2d_layer methods instead.
self._op_params = {
'kernel_regularizer': _build_keras_regularizer(
hyperparams_config.regularizer),
'kernel_initializer': _build_initializer(
hyperparams_config.initializer, build_for_keras=True),
'activation': _build_activation_fn(hyperparams_config.activation)
}
def use_batch_norm(self):
return self._batch_norm_params is not None
def batch_norm_params(self, **overrides):
"""Returns a dict containing batchnorm layer construction hyperparameters.
Optionally overrides values in the batchnorm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
**overrides: keyword arguments to override in the hyperparams dictionary
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
if self._batch_norm_params is None:
new_batch_norm_params = dict()
else:
new_batch_norm_params = self._batch_norm_params.copy()
new_batch_norm_params.update(overrides)
return new_batch_norm_params
def build_batch_norm(self, training=None, **overrides):
"""Returns a Batch Normalization layer with the appropriate hyperparams.
If the hyperparams are configured to not use batch normalization,
this will return a Keras Lambda layer that only applies tf.Identity,
without doing any normalization.
Optionally overrides values in the batch_norm hyperparam dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
training: if True, the normalization layer will normalize using the batch
statistics. If False, the normalization layer will be frozen and will
act as if it is being used for inference. If None, the layer
will look up the Keras learning phase at `call` time to decide what to
do.
**overrides: batch normalization construction args to override from the
batch_norm hyperparams dictionary.
Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
or a Keras Lambda layer that applies the identity (if use_batch_norm()
is False)
"""
if self.use_batch_norm():
return freezable_batch_norm.FreezableBatchNorm(
training=training,
**self.batch_norm_params(**overrides)
)
else:
return tf.keras.layers.Lambda(tf.identity)
def build_activation_layer(self, name='activation'):
"""Returns a Keras layer that applies the desired activation function.
Args:
name: The name to assign the Keras layer.
Returns: A Keras lambda layer that applies the activation function
specified in the hyperparam config, or applies the identity if the
activation function is None.
"""
if self._activation_fn:
return tf.keras.layers.Lambda(self._activation_fn, name=name)
else:
return tf.keras.layers.Lambda(tf.identity, name=name)
def params(self, include_activation=False, **overrides):
"""Returns a dict containing the layer construction hyperparameters to use.
Optionally overrides values in the returned dict. Overrides
only apply to individual calls of this method, and do not affect
future calls.
Args:
include_activation: If False, activation in the returned dictionary will
be set to `None`, and the activation must be applied via a separate
layer created by `build_activation_layer`. If True, `activation` in the
output param dictionary will be set to the activation function
specified in the hyperparams config.
**overrides: keyword arguments to override in the hyperparams dictionary.
Returns: dict containing the layer construction keyword arguments, with
values overridden by the `overrides` keyword arguments.
"""
new_params = self._op_params.copy()
new_params['activation'] = None
if include_activation:
new_params['activation'] = self._activation_fn
if self.use_batch_norm() and self.batch_norm_params()['center']:
new_params['use_bias'] = False
else:
new_params['use_bias'] = True
new_params.update(**overrides)
return new_params
def build(hyperparams_config, is_training):
"""Builds tf-slim arg_scope for convolution ops based on the config.
Returns an arg_scope to use for convolution ops containing weights
initializer, weights regularizer, activation function, batch norm function
and batch norm parameters based on the configuration.
Note that if no normalization parameters are specified in the config,
(i.e. left to default) then both batch norm and group norm are excluded
from the arg_scope.
The batch norm parameters are set for updates based on `is_training` argument
and conv_hyperparams_config.batch_norm.train parameter. During training, they
are updated only if batch_norm.train parameter is true. However, during eval,
no updates are made to the batch norm variables. In both cases, their current
values are used during forward pass.
Args:
hyperparams_config: hyperparams.proto object containing
hyperparameters.
is_training: Whether the network is in training mode.
Returns:
arg_scope_fn: A function to construct tf-slim arg_scope containing
hyperparameters for ops.
Raises:
ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
"""
if not isinstance(hyperparams_config,
hyperparams_pb2.Hyperparams):
raise ValueError('hyperparams_config not of type '
'hyperparams_pb.Hyperparams.')
normalizer_fn = None
batch_norm_params = None
if hyperparams_config.HasField('batch_norm'):
normalizer_fn = slim.batch_norm
batch_norm_params = _build_batch_norm_params(
hyperparams_config.batch_norm, is_training)
if hyperparams_config.HasField('group_norm'):
normalizer_fn = tf.contrib.layers.group_norm
affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
if hyperparams_config.HasField('op') and (
hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
affected_ops = [slim.fully_connected]
def scope_fn():
with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
if batch_norm_params is not None else
context_manager.IdentityContextManager()):
with slim.arg_scope(
affected_ops,
weights_regularizer=_build_slim_regularizer(
hyperparams_config.regularizer),
weights_initializer=_build_initializer(
hyperparams_config.initializer),
activation_fn=_build_activation_fn(hyperparams_config.activation),
normalizer_fn=normalizer_fn) as sc:
return sc
return scope_fn
def _build_activation_fn(activation_fn):
"""Builds a callable activation from config.
Args:
activation_fn: hyperparams_pb2.Hyperparams.activation
Returns:
Callable activation function.
Raises:
ValueError: On unknown activation function.
"""
if activation_fn == hyperparams_pb2.Hyperparams.NONE:
return None
if activation_fn == hyperparams_pb2.Hyperparams.RELU:
return tf.nn.relu
if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
return tf.nn.relu6
raise ValueError('Unknown activation function: {}'.format(activation_fn))
def _build_slim_regularizer(regularizer):
"""Builds a tf-slim regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
tf-slim regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
if regularizer_oneof is None:
return None
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_keras_regularizer(regularizer):
"""Builds a keras regularizer from config.
Args:
regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
Returns:
Keras regularizer.
Raises:
ValueError: On unknown regularizer.
"""
regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
if regularizer_oneof == 'l1_regularizer':
return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
if regularizer_oneof == 'l2_regularizer':
# The Keras L2 regularizer weight differs from the Slim L2 regularizer
# weight by a factor of 2
return tf.keras.regularizers.l2(
float(regularizer.l2_regularizer.weight * 0.5))
raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
def _build_initializer(initializer, build_for_keras=False):
"""Build a tf initializer from config.
Args:
initializer: hyperparams_pb2.Hyperparams.regularizer proto.
build_for_keras: Whether the initializers should be built for Keras
operators. If false builds for Slim.
Returns:
tf initializer.
Raises:
ValueError: On unknown initializer.
"""
initializer_oneof = initializer.WhichOneof('initializer_oneof')
if initializer_oneof == 'truncated_normal_initializer':
return tf.truncated_normal_initializer(
mean=initializer.truncated_normal_initializer.mean,
stddev=initializer.truncated_normal_initializer.stddev)
if initializer_oneof == 'random_normal_initializer':
return tf.random_normal_initializer(
mean=initializer.random_normal_initializer.mean,
stddev=initializer.random_normal_initializer.stddev)
if initializer_oneof == 'variance_scaling_initializer':
enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
DESCRIPTOR.enum_types_by_name['Mode'])
mode = enum_descriptor.values_by_number[initializer.
variance_scaling_initializer.
mode].name
if build_for_keras:
if initializer.variance_scaling_initializer.uniform:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='uniform')
else:
# In TF 1.9 release and earlier, the truncated_normal distribution was
# not supported correctly. So, in these earlier versions of tensorflow,
# the ValueError will be raised, and we manually truncate the
# distribution scale.
#
# It is insufficient to just set distribution to `normal` from the
# start, because the `normal` distribution in newer Tensorflow versions
# creates a truncated distribution, whereas it created untruncated
# distributions in older versions.
try:
return tf.variance_scaling_initializer(
scale=initializer.variance_scaling_initializer.factor,
mode=mode.lower(),
distribution='truncated_normal')
except ValueError:
truncate_constant = 0.87962566103423978
truncated_scale = initializer.variance_scaling_initializer.factor / (
truncate_constant * truncate_constant
)
return tf.variance_scaling_initializer(
scale=truncated_scale,
mode=mode.lower(),
distribution='normal')
else:
return slim.variance_scaling_initializer(
factor=initializer.variance_scaling_initializer.factor,
mode=mode,
uniform=initializer.variance_scaling_initializer.uniform)
raise ValueError('Unknown initializer function: {}'.format(
initializer_oneof))
def _build_batch_norm_params(batch_norm, is_training):
"""Build a dictionary of batch_norm params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
is_training: Whether the models is in training mode.
Returns:
A dictionary containing batch_norm parameters.
"""
batch_norm_params = {
'decay': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
# Remove is_training parameter from here and deprecate it in the proto
# once we refactor Faster RCNN models to set is_training through an outer
# arg_scope in the meta architecture.
'is_training': is_training and batch_norm.train,
}
return batch_norm_params
def _build_keras_batch_norm_params(batch_norm):
"""Build a dictionary of Keras BatchNormalization params from config.
Args:
batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
Returns:
A dictionary containing Keras BatchNormalization parameters.
"""
# Note: Although decay is defined to be 1 - momentum in batch_norm,
# decay in the slim batch_norm layers was erroneously defined and is
# actually the same as momentum in the Keras batch_norm layers.
# For context, see: github.com/keras-team/keras/issues/6839
batch_norm_params = {
'momentum': batch_norm.decay,
'center': batch_norm.center,
'scale': batch_norm.scale,
'epsilon': batch_norm.epsilon,
}
return batch_norm_params
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | dropoutGenerator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_DROPOUTGENERATOR_H
#define TT2I_DROPOUTGENERATOR_H
#include "random.h"
#include "cudaMemory.h"
#include "cuda_runtime.h"
namespace tts
{
class DropoutGenerator
{
public:
/**
* @brief Create a new dropout generator.
*
* @param maxBatchSize The maximum batch size.
* @param maxChunkSize The maximum number of chunks to generate at once.
* @param numValues The number of values to generate dropouts for.
* @param prob The probability with which to drop values.
* @param seed The seed to use for the random number generator.
*/
DropoutGenerator(int maxBatchSize, int maxChunkSize, int numValues, float prob, unsigned int seed = 0);
/**
* @brief Reset the random number generator.
*
* @param seed The seed to use.
* @param stream The stream to use.
*/
void reset(unsigned int seed, cudaStream_t stream);
/**
* @brief Generate a new set of dropout values.
*
* @param batchSize The size of the batch.
* @param numChunks The number of chunks to generate.
* @param stream The stream to generate in.
*/
void generate(int batchSize, int numChunks, cudaStream_t stream);
/**
* @brief Get a pointer to the device memory containing the dropout values.
* This memory is changed when `generate()` is called.
*
* @param chunk The chunk of dropouts to get.
*
* @return The memory location.
*/
const float* get(int chunk) const;
/**
* @brief Get the number of values generated with each call to `generate()`.
*
* @return
*/
int size() const
{
return mNumValues;
}
private:
float mProb;
int mNumValues;
int mMaxChunkSize;
int mGeneratedChunks;
int mBatchSize;
CudaMemory<float> mDropoutDevice;
Random mRand;
};
} // namespace tts
#endif
|
PyTorch/Translation/GNMT/seq2seq/train | train | trainer | # Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import time
from itertools import cycle
import numpy as np
import torch
import torch.optim
import torch.utils.data
from apex.parallel import DistributedDataParallel
from apex import amp
from seq2seq.train.fp_optimizers import FP16Optimizer
from seq2seq.train.fp_optimizers import FP32Optimizer
from seq2seq.train.fp_optimizers import AMPOptimizer
from seq2seq.train.lr_scheduler import WarmupMultiStepLR
from seq2seq.utils import AverageMeter
from seq2seq.utils import sync_workers
class Seq2SeqTrainer:
"""
Seq2SeqTrainer
"""
def __init__(self,
model,
criterion,
opt_config,
scheduler_config,
print_freq=10,
save_freq=1000,
grad_clip=float('inf'),
save_info={},
save_dir='.',
train_iterations=0,
checkpoint_filename='checkpoint%s.pth',
keep_checkpoints=5,
math='fp32',
loss_scaling={},
intra_epoch_eval=0,
prealloc_mode='always',
warmup=0,
iter_size=1,
translator=None,
verbose=False):
"""
Constructor for the Seq2SeqTrainer.
:param model: model to train
:param criterion: criterion (loss function)
:param opt_config: dictionary with options for the optimizer
:param scheduler_config: dictionary with options for the learning rate
scheduler
:param print_freq: prints short summary every 'print_freq' iterations
:param save_freq: saves checkpoint every 'save_freq' iterations
:param grad_clip: coefficient for gradient clipping
:param save_info: dict with additional state stored in each checkpoint
:param save_dir: path to the directiory for checkpoints
:param train_iterations: total number of training iterations to execute
:param checkpoint_filename: name of files with checkpoints
:param keep_checkpoints: max number of checkpoints to keep
:param math: arithmetic type
:param loss_scaling: options for dynamic loss scaling
:param intra_epoch_eval: number of additional eval runs within each
training epoch
:param prealloc_mode: controls preallocation,
choices=['off', 'once', 'always']
:param warmup: number of warmup iterations for performance counters
:param iter_size: number of iterations between weight updates
:param translator: instance of Translator, runs inference on test set
:param verbose: enables verbose logging
"""
super(Seq2SeqTrainer, self).__init__()
self.model = model
self.criterion = criterion
self.epoch = 0
self.save_info = save_info
self.save_dir = save_dir
self.save_freq = save_freq
self.save_counter = 0
self.checkpoint_filename = checkpoint_filename
self.checkpoint_counter = cycle(range(keep_checkpoints))
self.opt_config = opt_config
self.device = next(model.parameters()).device
self.print_freq = print_freq
self.verbose = verbose
self.loss = None
self.translator = translator
self.intra_epoch_eval = intra_epoch_eval
self.warmup = warmup
self.iter_size = iter_size
self.prealloc_mode = prealloc_mode
self.preallocated = False
self.distributed = torch.distributed.is_initialized()
self.batch_first = model.batch_first
params = self.model.parameters()
if math == 'manual_fp16':
self.fp_optimizer = FP16Optimizer(
self.model, grad_clip,
loss_scale=loss_scaling['init_scale'],
dls_upscale_interval=loss_scaling['upscale_interval']
)
params = self.fp_optimizer.fp32_params
elif math == 'fp32' or math == 'tf32':
self.fp_optimizer = FP32Optimizer(self.model, grad_clip)
opt_name = opt_config.pop('optimizer')
self.optimizer = torch.optim.__dict__[opt_name](params, **opt_config)
logging.info(f'Using optimizer: {self.optimizer}')
self.scheduler = WarmupMultiStepLR(self.optimizer, train_iterations,
**scheduler_config)
if math == 'fp16':
self.model, self.optimizer = amp.initialize(
self.model,
self.optimizer,
cast_model_outputs=torch.float16,
keep_batchnorm_fp32=False,
opt_level='O2')
self.fp_optimizer = AMPOptimizer(
self.model,
grad_clip,
loss_scale=loss_scaling['init_scale'],
dls_upscale_interval=loss_scaling['upscale_interval']
)
if self.distributed:
self.model = DistributedDataParallel(self.model)
def iterate(self, src, tgt, update=True, training=True):
"""
Performs one iteration of the training/validation.
:param src: batch of examples from the source language
:param tgt: batch of examples from the target language
:param update: if True: optimizer does update of the weights
:param training: if True: executes optimizer
"""
src, src_length = src
tgt, tgt_length = tgt
src = src.to(self.device)
tgt = tgt.to(self.device)
src_length = src_length.to(self.device)
num_toks = {}
num_toks['tgt'] = int(sum(tgt_length - 1))
num_toks['src'] = int(sum(src_length))
if self.batch_first:
output = self.model(src, src_length, tgt[:, :-1])
tgt_labels = tgt[:, 1:]
T, B = output.size(1), output.size(0)
else:
output = self.model(src, src_length, tgt[:-1])
tgt_labels = tgt[1:]
T, B = output.size(0), output.size(1)
loss = self.criterion(output.view(T * B, -1),
tgt_labels.contiguous().view(-1))
loss_per_batch = loss.item()
loss /= (B * self.iter_size)
if training:
self.fp_optimizer.step(loss, self.optimizer, self.scheduler,
update)
loss_per_token = loss_per_batch / num_toks['tgt']
loss_per_sentence = loss_per_batch / B
return loss_per_token, loss_per_sentence, num_toks
def feed_data(self, data_loader, training=True):
"""
Runs training or validation on batches from data_loader.
:param data_loader: data loader
:param training: if True runs training else runs validation
"""
if training:
assert self.optimizer is not None
eval_fractions = np.linspace(0, 1, self.intra_epoch_eval+2)[1:-1]
iters_with_update = len(data_loader) // self.iter_size
eval_iters = (eval_fractions * iters_with_update).astype(int)
eval_iters = eval_iters * self.iter_size
eval_iters = set(eval_iters)
batch_time = AverageMeter(self.warmup)
data_time = AverageMeter(self.warmup)
losses_per_token = AverageMeter()
losses_per_sentence = AverageMeter()
tot_tok_time = AverageMeter(self.warmup)
src_tok_time = AverageMeter(self.warmup)
tgt_tok_time = AverageMeter(self.warmup)
batch_size = data_loader.batch_size
if self.device.type == 'cuda':
torch.cuda.synchronize()
end = time.time()
for i, (src, tgt) in enumerate(data_loader):
self.save_counter += 1
# measure data loading time
data_time.update(time.time() - end)
update = False
if i % self.iter_size == self.iter_size - 1:
update = True
# do a train/evaluate iteration
stats = self.iterate(src, tgt, update, training=training)
loss_per_token, loss_per_sentence, num_toks = stats
# measure accuracy and record loss
losses_per_token.update(loss_per_token, num_toks['tgt'])
losses_per_sentence.update(loss_per_sentence, batch_size)
# measure elapsed time
if self.device.type == 'cuda':
torch.cuda.synchronize()
elapsed = time.time() - end
batch_time.update(elapsed)
src_tok_time.update(num_toks['src'] / elapsed, elapsed)
tgt_tok_time.update(num_toks['tgt'] / elapsed, elapsed)
tot_num_toks = num_toks['tgt'] + num_toks['src']
tot_tok_time.update(tot_num_toks / elapsed, elapsed)
self.loss = losses_per_token.avg
if training and i in eval_iters:
eval_fname = f'eval_epoch_{self.epoch}_iter_{i}'
eval_path = os.path.join(self.save_dir, eval_fname)
_, eval_stats = self.translator.run(
calc_bleu=True,
epoch=self.epoch,
iteration=i,
eval_path=eval_path,
)
test_bleu = eval_stats['bleu']
log = []
log += [f'TRAIN [{self.epoch}][{i}/{len(data_loader)}]']
log += [f'BLEU: {test_bleu:.2f}']
log = '\t'.join(log)
logging.info(log)
self.model.train()
self.preallocate(data_loader.batch_size,
data_loader.dataset.max_len, training=True)
if i % self.print_freq == 0:
phase = 'TRAIN' if training else 'VALIDATION'
log = []
log += [f'{phase} [{self.epoch}][{i}/{len(data_loader)}]']
log += [f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})']
log += [f'Data {data_time.val:.2e} ({data_time.avg:.2e})']
log += [f'Tok/s {tot_tok_time.val:.0f} ({tot_tok_time.avg:.0f})']
if self.verbose:
log += [f'Src tok/s {src_tok_time.val:.0f} ({src_tok_time.avg:.0f})']
log += [f'Tgt tok/s {tgt_tok_time.val:.0f} ({tgt_tok_time.avg:.0f})']
log += [f'Loss/sentence {losses_per_sentence.val:.1f} ({losses_per_sentence.avg:.1f})']
log += [f'Loss/tok {losses_per_token.val:.4f} ({losses_per_token.avg:.4f})']
if training:
lr = self.optimizer.param_groups[0]['lr']
log += [f'LR {lr:.3e}']
log = '\t'.join(log)
logging.info(log)
save_chkpt = (self.save_counter % self.save_freq) == (self.save_freq - 1)
if training and save_chkpt:
self.save_counter = 0
self.save_info['iteration'] = i
identifier = next(self.checkpoint_counter, -1)
if identifier != -1:
with sync_workers() as rank:
if rank == 0:
self.save(identifier=identifier)
if self.device.type == 'cuda':
torch.cuda.synchronize()
end = time.time()
tot_tok_time.reduce('sum')
losses_per_token.reduce('mean')
return losses_per_token.avg, tot_tok_time.avg
def preallocate(self, batch_size, max_length, training):
"""
Generates maximum sequence length batch and runs forward and backward
pass without updating model parameters.
:param batch_size: batch size for preallocation
:param max_length: max sequence length for preallocation
:param training: if True preallocates memory for backward pass
"""
if self.prealloc_mode == 'always' or (self.prealloc_mode == 'once' and
not self.preallocated):
logging.info('Executing preallocation')
torch.cuda.empty_cache()
src_length = torch.full((batch_size,), max_length,
dtype=torch.int64)
tgt_length = torch.full((batch_size,), max_length,
dtype=torch.int64)
if self.batch_first:
shape = (batch_size, max_length)
else:
shape = (max_length, batch_size)
src = torch.full(shape, 4, dtype=torch.int64)
tgt = torch.full(shape, 4, dtype=torch.int64)
src = src, src_length
tgt = tgt, tgt_length
self.iterate(src, tgt, update=False, training=training)
self.model.zero_grad()
self.preallocated = True
def optimize(self, data_loader):
"""
Sets model in training mode, preallocates memory and runs training on
data provided by data_loader.
:param data_loader: data loader
"""
torch.set_grad_enabled(True)
self.model.train()
self.preallocate(data_loader.batch_size, data_loader.dataset.max_len,
training=True)
output = self.feed_data(data_loader, training=True)
self.model.zero_grad()
return output
def evaluate(self, data_loader):
"""
Sets model in eval mode, disables gradients, preallocates memory and
runs validation on data provided by data_loader.
:param data_loader: data loader
"""
torch.set_grad_enabled(False)
self.model.eval()
self.preallocate(data_loader.batch_size, data_loader.dataset.max_len,
training=False)
output = self.feed_data(data_loader, training=False)
self.model.zero_grad()
return output
def load(self, filename):
"""
Loads checkpoint from filename.
:param filename: path to the checkpoint file
"""
if os.path.isfile(filename):
checkpoint = torch.load(filename, map_location={'cuda:0': 'cpu'})
if self.distributed:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
self.fp_optimizer.initialize_model(self.model)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.epoch = checkpoint['epoch']
self.loss = checkpoint['loss']
logging.info(f'Loaded checkpoint {filename} (epoch {self.epoch})')
else:
logging.error(f'Invalid checkpoint: {filename}')
def save(self, identifier=None, is_best=False, save_all=False):
"""
Stores checkpoint to a file.
:param identifier: identifier for periodic checkpoint
:param is_best: if True stores checkpoint to 'model_best.pth'
:param save_all: if True stores checkpoint after completed training
epoch
"""
def write_checkpoint(state, filename):
filename = os.path.join(self.save_dir, filename)
logging.info(f'Saving model to {filename}')
torch.save(state, filename)
if self.distributed:
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
state = {
'epoch': self.epoch,
'state_dict': model_state,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'loss': getattr(self, 'loss', None),
}
state = dict(list(state.items()) + list(self.save_info.items()))
if identifier is not None:
filename = self.checkpoint_filename % identifier
write_checkpoint(state, filename)
if is_best:
filename = 'model_best.pth'
write_checkpoint(state, filename)
if save_all:
filename = f'checkpoint_epoch_{self.epoch:03d}.pth'
write_checkpoint(state, filename)
|
PyTorch/LanguageModeling/BERT/distillation/utils/squad | squad | squad_metrics | # coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import json
import logging
import math
import re
import string
import time
import tqdm
import os
import torch
from tokenization import BasicTokenizer
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
logger = logging.getLogger(__name__)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, preds):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example.qas_id
gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qas_id not in preds:
print("Missing prediction for %s" % qas_id)
continue
prediction = preds[qas_id]
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
main_eval["has_ans_exact"] = has_ans_exact
main_eval["has_ans_f1"] = has_ans_f1
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for _, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
if no_answer_probs is None:
no_answer_probs = {k: 0.0 for k in preds}
exact, f1 = get_raw_scores(examples, preds)
exact_threshold = apply_no_ans_threshold(
exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
evaluation = make_eval_dict(exact_threshold, f1_threshold)
if has_answer_qids:
has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
merge_eval(evaluation, has_ans_eval, "HasAns")
if no_answer_qids:
no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
merge_eval(evaluation, no_ans_eval, "NoAns")
if no_answer_probs:
find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
return evaluation
def compute_predictions(
all_examples,
all_features,
all_results,
args,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
):
answers, nbest_answers = get_answers(all_examples, all_features, all_results, args)
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(answers, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(nbest_answers, indent=4) + "\n")
# if args.version_2_with_negative:
# with open(output_null_log_odds_file, "w") as writer:
# writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return answers
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def get_answers(examples, features, results, args):
predictions = collections.defaultdict(list) # it is possible that one example corresponds to multiple features
_Prediction = collections.namedtuple('_Prediction', ['text', 'start_logit', 'end_logit'])
if args.version_2_with_negative:
null_vals = collections.defaultdict(lambda: (float("inf"), 0, 0))
for ex, feat, result in match_results(examples, features, results):
if not args.joint_prediction:
start_indices = _get_best_indices(result.start_logits, args.n_best_size)
end_indices = _get_best_indices(result.end_logits, args.n_best_size)
prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, feat, result, args)
feature_null_score = result.start_logits[0] + result.end_logits[0]
else:
prelim_predictions = get_valid_prelim_predictions_joint_head(result.start_top_index, result.end_top_index,
feat, result, args)
# start_indices = result.start_top_index
# end_indices = result.end_top_index
feature_null_score = result.cls_logits
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
if args.version_2_with_negative and feature_null_score < null_vals[ex.qas_id][0]:
null_vals[ex.qas_id] = (feature_null_score, result.start_logits[0], result.end_logits[0])
curr_predictions = []
seen_predictions = set()
for pred in prelim_predictions:
if len(curr_predictions) == args.n_best_size:
break
if pred.start_index > 0:
final_text = get_answer_text(ex, feat, pred, args)
else:
final_text = ''
if final_text in seen_predictions:
continue
seen_predictions.add(final_text)
curr_predictions.append(_Prediction(final_text, pred.start_logit, pred.end_logit))
predictions[ex.qas_id] += curr_predictions
# Add empty prediction
if args.version_2_with_negative:
for qas_id in predictions.keys():
predictions[qas_id].append(_Prediction('',
null_vals[qas_id][1],
null_vals[qas_id][2]))
nbest_answers = collections.defaultdict(list)
answers = {}
for qas_id, preds in predictions.items():
seen_predictions = set()
nbest = []
for pred in sorted(predictions[qas_id], key=lambda x: (x.start_logit + x.end_logit), reverse=True):
if len(nbest) >= args.n_best_size:
break
if pred.text in seen_predictions:
continue
seen_predictions.add(pred.text)
nbest.append(pred)
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if not nbest or (args.version_2_with_negative and len(nbest) == 1):
nbest.append(_Prediction(text="empty", start_logit=0.0, end_logit=0.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry and entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_answers[qas_id].append(output)
if args.version_2_with_negative:
if not args.joint_prediction:
score_diff = null_vals[qas_id][0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit
else:
score_diff = null_vals[qas_id][0]
if score_diff > args.null_score_diff_threshold:
answers[qas_id] = ""
else:
answers[qas_id] = best_non_null_entry.text
else:
answers[qas_id] = nbest_answers[qas_id][0]['text']
return answers, nbest_answers
def get_answer_text(example, feature, pred, args):
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, args.do_lower_case, args.verbose_logging)
return final_text
def get_valid_prelim_predictions_joint_head(start_indices, end_indices, feature, result, args):
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
# for start_index in start_indices:
for i in range(args.beam_size):
start_index = start_indices[i]
for j in range(args.beam_size):
# for end_index in end_indices:
end_index = end_indices[i * args.beam_size + j]
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[i], # start_index],
end_logit=result.end_logits[i * args.beam_size + j])) # end_index]))
return prelim_predictions
def get_valid_prelim_predictions(start_indices, end_indices, feature, result, args):
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction",
["start_index", "end_index", "start_logit", "end_logit"])
prelim_predictions = []
for start_index in start_indices:
for end_index in end_indices:
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > args.max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
return prelim_predictions
def match_results(examples, features, results):
unique_f_ids = set([f.unique_id for f in features])
unique_r_ids = set([r.unique_id for r in results])
matching_ids = unique_f_ids & unique_r_ids
features = [f for f in features if f.unique_id in matching_ids]
results = [r for r in results if r.unique_id in matching_ids]
features.sort(key=lambda x: x.unique_id)
results.sort(key=lambda x: x.unique_id)
for f, r in zip(features, results): # original code assumes strict ordering of examples. TODO: rewrite this
yield examples[f.example_index], f, r
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indices(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indices = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indices.append(index_and_score[i][0])
return best_indices
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def to_list(tensor):
return tensor.detach().cpu().tolist()
def evaluate(args, model, dataset, examples, features, prefix=""):
# if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
# os.makedirs(args.output_dir)
args.eval_batch_size = args.train_batch_size#args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
# if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
# model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = time.time()#timeit.default_timer()
# for batch in tqdm(eval_dataloader, desc="Evaluating"):
for batch in eval_dataloader:
# for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
#"cls_index": batch[4],
#"p_mask": batch[5],
#"eval": True,
}
feature_indices = batch[3]
with torch.cuda.amp.autocast(enabled=args.amp):
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits, )
else:
start_logits, end_logits = output
result = RawResult(unique_id, start_logits, end_logits)
all_results.append(result)
eval_time = time.time() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
# start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top
# end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top
predictions = compute_predictions(
examples,
features,
all_results,
args,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
)
# Compute the F1 and exact scores.
results = squad_evaluate(examples, predictions)
results["acc"] = results["f1"]
return results
|
PyTorch/LanguageModeling/BERT/triton/runner | runner | executor | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import Accelerator, Precision
from .core import Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import clean_directory, exec_command, format_env_key, format_env_value, get_result_path
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}"
)
results = {}
environment = self._prepare_environment(task, experiment.parameters)
LOGGER.info(f"Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters["accelerator"],
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}"
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}"
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}"
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, parameters: Dict) -> Dict:
"""
Prepare environment data and export it
Args:
parameters: Key and values which should be exported to environment
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_INSTANCES": "1",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
}
checkpoint_variant = parameters.get("checkpoint_variant")
if checkpoint_variant:
del parameters["checkpoint_variant"]
environment["CHECKPOINT_DIR"] = task.checkpoints[checkpoint_variant].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = value
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == Accelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}")
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}"
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}"
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
|
PyTorch/Classification/ConvNets | ConvNets | model2onnx | import argparse
import torch
import pytorch_quantization
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
)
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_args(parser):
"""
Parse commandline arguments.
"""
model_names = available_models().keys()
parser.add_argument("--arch", "-a", metavar="ARCH", default="resnet50", choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)")
parser.add_argument("--device", metavar="DEVICE", default="cuda", choices=['cpu', 'cuda'],
help="device on which model is settled: cpu, cuda (default: cuda)")
parser.add_argument("--image-size", default=None, type=int, help="resolution of image")
parser.add_argument('--output', type=str, help='Path to converted model')
parser.add_argument("-b", "--batch-size", default=256, type=int, metavar="N",
help="mini-batch size (default: 256) per gpu")
return parser
def final_name(base_name):
splitted = base_name.split('.')
if 'pt' in splitted:
fin_name = base_name.replace('pt', 'onnx')
elif 'pth' in splitted:
fin_name = base_name.replace('pth', 'onnx')
elif len(splitted) > 1:
fin_name = '.'.join(splitted[:-1] + ['onnx'])
else:
fin_name = base_name + '.onnx'
return fin_name
def get_dataloader(image_size, bs, num_classes):
"""return dataloader for inference"""
from image_classification.dataloaders import get_synthetic_loader
def data_loader():
loader, _ = get_synthetic_loader(None, image_size, bs, num_classes, False)
for inp, _ in loader:
yield inp
break
return data_loader()
def prepare_inputs(dataloader, device):
"""load sample inputs to device"""
inputs = []
for batch in dataloader:
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
batch_d = (batch_d, )
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
batch_d.append(x.to(device))
batch_d = tuple(batch_d)
inputs.append(batch_d)
return inputs
def check_quant_weight_correctness(checkpoint_path, model):
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
state_dict = {k[len("module."):] if k.startswith("module.") else k: v for k, v in state_dict.items()}
quantizers_sd_keys = {f'{n[0]}._amax' for n in model.named_modules() if 'quantizer' in n[0]}
sd_all_keys = quantizers_sd_keys | set(model.state_dict().keys())
assert set(state_dict.keys()) == sd_all_keys, (f'Passed quantized architecture, but following keys are missing in '
f'checkpoint: {list(sd_all_keys - set(state_dict.keys()))}')
def main(args, model_args, model_arch):
quant_arch = args.arch in ['efficientnet-quant-b0', 'efficientnet-quant-b4']
if quant_arch:
pytorch_quantization.nn.modules.tensor_quantizer.TensorQuantizer.use_fb_fake_quant = True
model = model_arch(**model_args.__dict__)
if quant_arch and model_args.pretrained_from_file is not None:
check_quant_weight_correctness(model_args.pretrained_from_file, model)
image_size = args.image_size if args.image_size is not None else model.arch.default_image_size
train_loader = get_dataloader(image_size, args.batch_size, model_args.num_classes)
inputs = prepare_inputs(train_loader, args.device)
final_model_path = args.output if args.output is not None else final_name(model_args.pretrained_from_file)
model.to(args.device)
model.eval()
with torch.no_grad():
torch.onnx.export(model,
inputs[0],
final_model_path,
verbose=True,
opset_version=13,
enable_onnx_checker=True,
do_constant_folding=True)
if __name__ == '__main__':
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser = parse_args(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
main(args, model_args, model_arch)
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2 | tacotron2 | data_function | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.utils.data
import tacotron2_common.layers as layers
from tacotron2_common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
from tacotron2.text import text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.text_cleaners = args.text_cleaners
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.load_mel_from_disk = args.load_mel_from_disk
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
len_text = len(text)
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel, len_text)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.load(filename)
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
# count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
return text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x
def batch_to_gpu(batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)
y = (mel_padded, gate_padded)
len_x = torch.sum(output_lengths)
return (x, y, len_x)
|
TensorFlow2/Classification/ConvNets/model/layers | layers | activations | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Customized Swish activation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import math
import tensorflow as tf
__all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation']
@tf.keras.utils.register_keras_serializable(package='Text')
def simple_swish(features):
"""Computes the Swish activation function.
The tf.nn.swish operation uses a custom gradient to reduce memory usage.
Since saving custom gradients in SavedModel is currently not supported, and
one would not be able to use an exported TF-Hub module for fine-tuning, we
provide this wrapper that can allow to select whether to use the native
TensorFlow swish operation, or whether to use a customized operation that
has uses default TensorFlow gradient computation.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.sigmoid(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def hard_swish(features):
"""Computes a hard version of the swish function.
This operation can be used to reduce computational cost and improve
quantization for edge devices.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return features * tf.nn.relu6(features + tf.constant(3.)) * (1. / 6.)
@tf.keras.utils.register_keras_serializable(package='Text')
def identity(features):
"""Computes the identity function.
Useful for helping in quantization.
Args:
features: A `Tensor` representing preactivation values.
Returns:
The activation value.
"""
features = tf.convert_to_tensor(features)
return tf.identity(features)
@tf.keras.utils.register_keras_serializable(package='Text')
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(math.sqrt(2 / math.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
# TODO(hongkuny): consider moving custom string-map lookup to keras api.
def get_activation(identifier):
"""Maps a identifier to a Python function, e.g., "relu" => `tf.nn.relu`.
It checks string first and if it is one of customized activation not in TF,
the corresponding activation will be returned. For non-customized activation
names and callable identifiers, always fallback to tf.keras.activations.get.
Args:
identifier: String name of the activation function or callable.
Returns:
A Python function corresponding to the activation function.
"""
if isinstance(identifier, six.string_types):
name_to_fn = {
"gelu": gelu,
"simple_swish": simple_swish,
"hard_swish": hard_swish,
"identity": identity,
}
identifier = str(identifier).lower()
if identifier in name_to_fn:
return tf.keras.activations.get(name_to_fn[identifier])
return tf.keras.activations.get(identifier)
|
PyTorch/Translation/Transformer | Transformer | inference | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import time
from collections import namedtuple
import numpy as np
import torch
from torch.serialization import default_restore_location
from fairseq import data, options, tokenizer, utils, log_helper
from fairseq.sequence_generator import SequenceGenerator
from fairseq.meters import StopwatchMeter
from fairseq.models.transformer import TransformerModel
import dllogger
from apply_bpe import BPE
Batch = namedtuple('Batch', 'srcs tokens lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def load_ensemble_for_inference(filenames):
"""Load an ensemble of models for inference.
model_arg_overrides allows you to pass a dictionary model_arg_overrides --
{'arg_name': arg} -- to override model args that were used during model
training
"""
# load model architectures and weights
states = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError('Model file not found: {}'.format(filename))
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
states.append(state)
ensemble = []
for state in states:
args = state['args']
# build model for ensemble
model = TransformerModel.build_model(args)
model.load_state_dict(state['model'], strict=True)
ensemble.append(model)
src_dict = states[0]['extra_state']['src_dict']
tgt_dict = states[0]['extra_state']['tgt_dict']
return ensemble, args, src_dict, tgt_dict
def buffered_read(buffer_size, data_descriptor):
buffer = []
for src_str in data_descriptor:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if buffer:
yield buffer
def make_batches(lines, args, src_dict, max_positions, bpe=None):
tokens = [
tokenizer.Tokenizer.tokenize(
src_str,
src_dict,
tokenize=tokenizer.tokenize_en,
add_if_not_exist=False,
bpe=bpe
).long()
for src_str in lines
]
lengths = np.array([t.numel() for t in tokens])
itr = data.EpochBatchIterator(
dataset=data.LanguagePairDataset(tokens, lengths, src_dict),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
srcs=[lines[i] for i in batch['id']],
tokens=batch['net_input']['src_tokens'],
lengths=batch['net_input']['src_lengths'],
), batch['id']
def setup_logger(args):
if not args.no_dllogger:
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=1, filename=args.stat_file)])
for k, v in vars(args).items():
dllogger.log(step='PARAMETER', data={k:v}, verbosity=0)
container_setup_info = log_helper.get_framework_env_vars()
dllogger.log(step='PARAMETER', data=container_setup_info, verbosity=0)
dllogger.metadata('throughput',
{'unit':'tokens/s', 'format':':/3f', 'GOAL':'MAXIMIZE', 'STAGE':'INFER'})
else:
dllogger.init(backends=[])
def main(args):
setup_logger(args)
args.interactive = sys.stdin.isatty() and not args.file # Just make the code more understendable
if args.file:
data_descriptor = open(args.file, 'r')
else:
data_descriptor = sys.stdin
if args.interactive:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
if args.buffer_size > 50000:
print("WARNING: To prevent memory exhaustion buffer size is set to 50000", file=sys.stderr)
args.buffer_size = 50000
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args, file=sys.stderr)
use_cuda = torch.cuda.is_available() and not args.cpu
torch.cuda.synchronize()
processing_start = time.time()
# Load ensemble
print('| loading model(s) from {}'.format(args.path), file=sys.stderr)
model_paths = args.path.split(':')
models, model_args, src_dict, tgt_dict = load_ensemble_for_inference(model_paths)
if args.fp16:
for model in models:
model.half()
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(need_attn=args.print_alignment)
# Initialize generator
translator = SequenceGenerator(
models,
tgt_dict.get_metadata(),
maxlen=args.max_target_positions,
beam_size=args.beam,
stop_early=(not args.no_early_stop),
normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen,
unk_penalty=args.unkpen,
sampling=args.sampling,
sampling_topk=args.sampling_topk,
minlen=args.min_len,
sampling_temperature=args.sampling_temperature
)
if use_cuda:
translator.cuda()
# Load BPE codes file
bpe = None
if args.bpe_codes:
codes = open(args.bpe_codes, 'r')
bpe = BPE(codes)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
def make_result(src_str, hypos):
result = Translation(
src_str=src_str,
hypos=[],
pos_scores=[],
alignments=[],
)
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
hypo_str = tokenizer.Tokenizer.detokenize(hypo_str, 'de').strip()
result.hypos.append((hypo['score'], hypo_str))
result.pos_scores.append('P\t' + ' '.join(f'{x:.4f}' for x in hypo['positional_scores'].tolist()))
result.alignments.append('A\t' + ' '.join(str(utils.item(x)) for x in alignment)
if args.print_alignment else None
)
return result
gen_timer = StopwatchMeter()
def process_batch(batch):
tokens = batch.tokens
lengths = batch.lengths
if use_cuda:
tokens = tokens.cuda()
lengths = lengths.cuda()
torch.cuda.synchronize()
translation_start = time.time()
gen_timer.start()
translations = translator.generate(
tokens,
lengths,
maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
)
gen_timer.stop(sum(len(h[0]['tokens']) for h in translations))
torch.cuda.synchronize()
dllogger.log(step='infer', data={'latency': time.time() - translation_start})
return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
if args.interactive:
print('| Type the input sentence and press return:')
for inputs in buffered_read(args.buffer_size, data_descriptor):
indices = []
results = []
for batch, batch_indices in make_batches(inputs, args, src_dict, args.max_positions, bpe):
indices.extend(batch_indices)
results += process_batch(batch)
for i in np.argsort(indices):
result = results[i]
print(result.src_str, file=sys.stderr)
for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments):
print(f'Score {hypo[0]}', file=sys.stderr)
print(hypo[1])
if align is not None:
print(align, file=sys.stderr)
if args.file:
data_descriptor.close()
torch.cuda.synchronize()
log_dict = {
'throughput': 1./gen_timer.avg,
'latency_avg': sum(gen_timer.intervals)/len(gen_timer.intervals),
'latency_p90': gen_timer.p(90),
'latency_p95': gen_timer.p(95),
'latency_p99': gen_timer.p(99),
'total_infernece_time': gen_timer.sum,
'total_run_time': time.time() - processing_start,
}
print('Translation time: {} s'.format(log_dict['total_infernece_time']),
file=sys.stderr)
print('Model throughput (beam {}): {} tokens/s'.format(args.beam, log_dict['throughput']),
file=sys.stderr)
print('Latency:\n\tAverage {:.3f}s\n\tp90 {:.3f}s\n\tp95 {:.3f}s\n\tp99 {:.3f}s'.format(
log_dict['latency_avg'], log_dict['latency_p90'], log_dict['latency_p95'], log_dict['latency_p99']),
file=sys.stderr)
print('End to end time: {} s'.format(log_dict['total_run_time']), file=sys.stderr)
dllogger.log(step=(), data=log_dict)
if __name__ == '__main__':
parser = options.get_inference_parser()
parser.add_argument('--no-dllogger', action='store_true')
ARGS = options.parse_args_and_arch(parser)
main(ARGS)
|
TensorFlow/Detection/SSD/models/research/object_detection/utils | utils | np_box_list | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Numpy BoxList classes and functions."""
import numpy as np
class BoxList(object):
"""Box collection.
BoxList represents a list of bounding boxes as numpy array, where each
bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a
given list correspond to a single image.
Optionally, users can add additional related fields (such as
objectness/classification scores).
"""
def __init__(self, data):
"""Constructs box collection.
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Raises:
ValueError: if bbox data is not a numpy array
ValueError: if invalid dimensions for bbox data
"""
if not isinstance(data, np.ndarray):
raise ValueError('data must be a numpy array.')
if len(data.shape) != 2 or data.shape[1] != 4:
raise ValueError('Invalid dimensions for box data.')
if data.dtype != np.float32 and data.dtype != np.float64:
raise ValueError('Invalid data type for box data: float is required.')
if not self._is_valid_boxes(data):
raise ValueError('Invalid box data. data must be a numpy array of '
'N*[y_min, x_min, y_max, x_max]')
self.data = {'boxes': data}
def num_boxes(self):
"""Return number of boxes held in collections."""
return self.data['boxes'].shape[0]
def get_extra_fields(self):
"""Return all non-box fields."""
return [k for k in self.data.keys() if k != 'boxes']
def has_field(self, field):
return field in self.data
def add_field(self, field, field_data):
"""Add data to a specified field.
Args:
field: a string parameter used to speficy a related field to be accessed.
field_data: a numpy array of [N, ...] representing the data associated
with the field.
Raises:
ValueError: if the field is already exist or the dimension of the field
data does not matches the number of boxes.
"""
if self.has_field(field):
raise ValueError('Field ' + field + 'already exists')
if len(field_data.shape) < 1 or field_data.shape[0] != self.num_boxes():
raise ValueError('Invalid dimensions for field data')
self.data[field] = field_data
def get(self):
"""Convenience function for accesssing box coordinates.
Returns:
a numpy array of shape [N, 4] representing box corners
"""
return self.get_field('boxes')
def get_field(self, field):
"""Accesses data associated with the specified field in the box collection.
Args:
field: a string parameter used to speficy a related field to be accessed.
Returns:
a numpy 1-d array representing data of an associated field
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field {} does not exist'.format(field))
return self.data[field]
def get_coordinates(self):
"""Get corner coordinates of boxes.
Returns:
a list of 4 1-d numpy arrays [y_min, x_min, y_max, x_max]
"""
box_coordinates = self.get()
y_min = box_coordinates[:, 0]
x_min = box_coordinates[:, 1]
y_max = box_coordinates[:, 2]
x_max = box_coordinates[:, 3]
return [y_min, x_min, y_max, x_max]
def _is_valid_boxes(self, data):
"""Check whether data fullfills the format of N*[ymin, xmin, ymax, xmin].
Args:
data: a numpy array of shape [N, 4] representing box coordinates
Returns:
a boolean indicating whether all ymax of boxes are equal or greater than
ymin, and all xmax of boxes are equal or greater than xmin.
"""
if data.shape[0] > 0:
for i in range(data.shape[0]):
if data[i, 0] > data[i, 2] or data[i, 1] > data[i, 3]:
return False
return True
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading | dataloading | dataloader_benchmark | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel (tgrel@nvidia.com)
from . import dataloader
import argparse
import os
import time
import tensorflow as tf
import horovod.tensorflow as hvd
from .feature_spec import FeatureSpec
def compute_bytes_per_batch(batch):
bytes_per_dtype = dict(
float16=2,
int32=4,
int8=1
)
(numerical, categorical), label = batch
numerical_bytes = numerical.shape[0] * numerical.shape[1] * bytes_per_dtype[numerical.dtype.name]
categorical_bytes = []
for c in categorical:
if hasattr(c, 'flat_values'):
# ragged tensor
values = c.flat_values
values_bytes = values.shape[0] * bytes_per_dtype[values.dtype.name]
categorical_bytes.append(values_bytes)
else:
# dense tensor
num_bytes = c.shape[0] * c.shape[1] * bytes_per_dtype[c.dtype.name]
categorical_bytes.append(num_bytes)
categorical_bytes = sum(categorical_bytes)
label_bytes = label.shape[0] * bytes_per_dtype[label.dtype.name]
return numerical_bytes + categorical_bytes + label_bytes
def main():
parser = argparse.ArgumentParser(description="Benchmark a dataloader")
parser.add_argument('--dataset_path', default='synthetic_dataset', type=str,
help='Path to the destination directory')
parser.add_argument('--dataset_type', type=str, choices=['tf_raw', 'split_tfrecords'])
parser.add_argument('--batch_size', default=65536, type=int, help='Batch size')
parser.add_argument('--xla', default=False, action='store_true', help='Batch size')
parser.add_argument('--amp', default=False, action='store_true', help='Batch size')
parser.add_argument('--run_eagerly', default=False, action='store_true', help='Batch size')
parser.add_argument('--tfdata_debug', default=False, action='store_true', help='Batch size')
parser.add_argument('--feature_spec', type=str, default='feature_spec.yaml',
help='Filename of the feature spec describing the dataset')
parser.add_argument('--max_batches', type=int, default=100,
help='Stop after this many batches, even if there is still some data to be read')
parser.add_argument('--warmup_steps', type=int, default=5,
help='Number of warmup steps that are not benchmarked')
parser.add_argument('--sleep', type=int, default=0,
help='Sleep for this many seconds after creating the dataloader. For debug only.')
args = parser.parse_args()
args.synthetic_dataset_use_feature_spec = False
args.valid_batch_size = args.batch_size
if args.dataset_type == 'nvt' and not args.run_eagerly:
raise ValueError('NVT dataloader does not support graph mode. Please specify --run_eagerly to use it.')
if args.xla:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_auto_jit=fusible'
hvd.init()
gpus = tf.config.experimental.list_physical_devices('GPU')
if args.dataset_type != 'nvt':
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
visible_gpus = []
if gpus:
visible_gpus = gpus[hvd.local_rank()]
tf.config.experimental.set_visible_devices(visible_gpus, 'GPU')
if args.amp:
policy = tf.keras.mixed_precision.Policy("mixed_float16")
tf.keras.mixed_precision.set_global_policy(policy)
tf.config.run_functions_eagerly(args.run_eagerly)
if args.tfdata_debug:
tf.data.experimental.enable_debug_mode()
fspec_path = os.path.join(args.dataset_path, args.feature_spec)
feature_spec = FeatureSpec.from_yaml(fspec_path)
table_ids = list(range(len(feature_spec.get_categorical_sizes())))
table_ids = table_ids[hvd.rank()::hvd.size()]
print('Creating the pipelines')
train_pipeline, validation_pipeline = dataloader.create_input_pipelines(args, table_ids=table_ids,
rank=hvd.rank(),
world_size=hvd.size())
print('Benchmarking...')
it = iter(train_pipeline.op())
reduce_input = tf.convert_to_tensor([0], dtype=tf.float32, name='reduce_input')
@tf.function
def step():
device = '/GPU:0'
with tf.device(device):
b = next(it)
_ = hvd.allreduce(reduce_input, name='barrier')
return
for i in range(args.warmup_steps):
print('warmup step:', i)
l = step()
rank = hvd.rank()
if args.sleep != 0:
print('sleeping...')
time.sleep(args.sleep)
begin = time.time()
current = begin
for idx in range(args.max_batches):
l = step()
new = time.time()
if rank == 0:
print(f'batch: {idx}, step time: {current - new:.3f}')
current = new
end = time.time()
print('Benchmark done')
num_batches = (idx + 1)
elapsed = (end - begin)
batches_per_second = num_batches / elapsed
samples_per_second = batches_per_second * args.batch_size
if rank == 0:
print(f'Batches per second: {batches_per_second:.2e}')
print(f'Samples per second: {samples_per_second:.2e}')
if __name__ == '__main__':
main()
|
Kaldi/SpeechRecognition/scripts | scripts | nvidia_kaldi_triton_entrypoint | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
if [ -d "/mnt/model-repo/kaldi_online" ]; then
ln -s /mnt/model-repo/kaldi_online/config.pbtxt /workspace/model-repo/kaldi_online/
fi
/opt/tritonserver/nvidia_entrypoint.sh $@
|
PyTorch/SpeechRecognition/wav2vec2 | wav2vec2 | inference | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import math
import os
import random
import time
import warnings
from argparse import ArgumentParser
from heapq import nlargest
from itertools import chain, repeat
from pathlib import Path
from tqdm import tqdm
import dllogger
import numpy as np
import torch
import torch.distributed as distrib
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
import wav2vec2.arg_parser
import wav2vec2.utils
import common.fairseq.utils as utils
from common.fairseq.data import Dictionary
from common.helpers import (gather_predictions, gather_transcripts,
load_wrapped_state, process_evaluation_epoch)
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
from common.utils import print_once
from torch.utils.data import DataLoader, DistributedSampler
from wav2vec2.logging import init_infer_metadata
def durs_to_percentiles(durations, ratios):
durations = np.asarray(durations) * 1000 # in ms
latency = durations
latency = latency[5:]
mean_latency = np.mean(latency)
latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)),
latency)
latency_ranges = get_percentile(ratios, latency_worst, len(latency))
latency_ranges[0.5] = mean_latency
return latency_ranges
def get_percentile(ratios, arr, nsamples):
res = {}
for a in ratios:
idx = max(int(nsamples * (1 - a)), 0)
res[a] = arr[idx]
return res
def fp_convert_batch(batch, precision):
dt = {'fp32': torch.float32, 'fp16': torch.half,
'bf16': torch.bfloat16}[precision]
def maybe_cast(t):
if t.dtype is torch.float32:
return t.to(dtype=dt)
return t
return utils.apply_to_sample(maybe_cast, batch)
def main():
parser = ArgumentParser(description='wav2vec2.0 inference')
wav2vec2.arg_parser.populate_infer(parser)
args = parser.parse_args()
ckpt = torch.load(args.w2v_path, map_location=torch.device("cpu"))
train_args = wav2vec2.utils.get_ckpt_args(ckpt)
is_nv_ckpt = "mode" in train_args
if is_nv_ckpt:
print("Loaded a model trained with NVIDIA DLE")
args.fp32_pos_conv = train_args.get("fp32_pos_conv",
args.fp16 or args.bf16)
args.fp32_conv_norms = train_args.get("fp32_conv_norms", args.fp16)
else:
args.fp32_pos_conv = args.fp16
args.fp32_conv_norms = args.fp16
args.fp32_pos_conv = True
args.fp32_conv_norms = True
log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
[dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
init_infer_metadata()
if ((train_args.get("fp16", False) or train_args.get("amp", False))
and args.bf16):
warnings.warn('Using FP16 ckpts in BF16 precision.')
if train_args.get("bf16", False) and args.fp16:
warnings.warn('Using BF16 ckpts in FP16 precision.')
# load output labels - either from a file, or stored inside an nv ckpt
assert args.labels_path is not None or is_nv_ckpt
if args.labels_path is None:
f = io.StringIO(ckpt["output_labels"])
else:
f = open(args.labels_path)
target_dictionary = Dictionary.load(f)
f.close()
w2v_path_for_args = args.w2v_path_for_args or args.w2v_path
wav2vec2.utils.update_args_for_finetuning(args, w2v_path_for_args)
# "default" GroupNorm might leak padding
args.masked_feature_extractor = True
if args.torchscript:
from common.fairseq.modules import layer_norm
layer_norm.TORCHSCRIPT = True
model, *_ = wav2vec2.utils.build_model(args, "infer", target_dictionary)
load_wrapped_state(model, ckpt["model"])
model.w2v_encoder.w2v_model.remove_conv_wn()
model.w2v_encoder.w2v_model.feature_extractor.forward = \
model.w2v_encoder.w2v_model.feature_extractor.masked_forward
model.w2v_encoder.forward = model.w2v_encoder.infer
model.w2v_encoder.w2v_model.forward = model.w2v_encoder.w2v_model.infer
if args.cpu:
device = torch.device('cpu')
else:
assert torch.cuda.is_available()
device = torch.device('cuda')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
# set up distributed training
multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
distrib.init_process_group(backend='nccl', init_method='env://')
print_once(f'Inference with {distrib.get_world_size()} GPUs')
measure_perf = args.steps > 0
# Compliance with fairseq dataloader
assert args.batch_size is not None
args.min_sample_size = None
args.max_sample_size = None
if args.transcribe_wav or args.transcribe_filelist:
assert args.max_duration is None and not measure_perf
assert not (args.transcribe_wav and args.transcribe_filelist)
assert args.labels is None, "Labels won't be used during trainscribing"
assert not multi_gpu, (
"multigpu is currently supported only for WER/perf measurements")
if args.transcribe_wav:
dataset = wav2vec2.utils.single_audio_dataset(args.transcribe_wav,
args)
else:
dataset = wav2vec2.utils.load_dataset(args.transcribe_filelist,
args, target_dictionary)
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=dataset.collater,
num_workers=args.num_workers,
pin_memory=True,
persistent_workers=args.num_workers > 0,
drop_last=False,
)
else: # compute WER or measure perf
assert args.labels is not None or measure_perf
dataset = wav2vec2.utils.load_dataset(args.valid_subset, args,
target_dictionary,
with_labels=True)
sampler = DistributedSampler(
dataset,
shuffle=False,
drop_last=False
) if multi_gpu else None
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
collate_fn=dataset.collater,
num_workers=args.num_workers,
pin_memory=True,
persistent_workers=args.num_workers > 0,
drop_last=(True if measure_perf else False),
)
model.to(device)
model.eval()
assert args.amp == args.fp16, 'During inference these are equivalent'
if args.fp16:
model = model.half()
if args.bf16:
model = model.to(dtype=torch.bfloat16)
if (args.fp16 or args.bf16) and args.fp32_pos_conv:
model.w2v_encoder.w2v_model.encoder.pos_conv.to(dtype=torch.float32)
if args.torchscript:
print("Attempting TorchScript export...")
model = torch.jit.script(model)
agg = {'txts': [], 'preds': [], 'logits': [], 'ids': []}
dur = {'data': [], 'dnn': [], 'data+dnn': []}
looped_loader = chain.from_iterable(repeat(data_loader))
sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
steps = args.steps + args.warmup_steps or len(data_loader)
desc = 'warmup' if args.warmup_steps > 0 else 'inference'
pbar = tqdm(looped_loader, initial=1, total=steps, desc=desc)
for it, batch in enumerate(pbar):
if it == args.warmup_steps:
pbar.set_description('inference')
batch = utils.move_to_cuda(batch)
sync()
t1 = time.time()
if args.fp16:
batch = fp_convert_batch(batch, 'fp16')
if args.bf16:
batch = fp_convert_batch(batch, 'bf16')
with torch.no_grad():
enc_out, padding_mask = model(batch["net_input"]["source"],
batch["net_input"]["padding_mask"])
logp = model.get_normalized_probs(enc_out,
padding_mask,
log_probs=True).contiguous()
# greedy decoding
preds = logp.argmax(dim=-1, keepdim=False).int()
sync()
t2 = time.time()
# burn-in period; wait for a new loader due to num_workers
if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
dur['data'].append(t1 - t0)
dur['dnn'].append(t2 - t1)
dur['data+dnn'].append(t2 - t0)
preds = preds.transpose(0, 1)
agg['preds'] += gather_predictions([preds],
target_dictionary,
blank_id=0)
agg['logits'].append(logp)
if 'target' in batch:
agg['txts'] += gather_transcripts([batch['target']],
[batch['target_lengths']],
target_dictionary)
if multi_gpu:
# ids are needed to remove duplicates in multi_gpu inference
agg['ids'] += batch['id'].tolist()
if it + 1 == steps:
break
sync()
t0 = time.time()
tdict = target_dictionary
agg['preds'] = [pred.replace(tdict[tdict.nspecial], ' ')
for pred in agg['preds']]
agg['txts'] = [txt.replace(tdict[tdict.nspecial], ' ')
for txt in agg['txts']]
# communicate the results
if args.transcribe_wav or args.transcribe_filelist:
for idx, p in enumerate(agg['preds']):
print_once(f'Prediction {idx + 1: >3}: {p}')
elif args.valid_subset and not measure_perf:
wer, _ = process_evaluation_epoch(agg)
if not multi_gpu or distrib.get_rank() == 0:
dllogger.log(step=(), data={'eval_wer': 100 * wer})
if args.save_predictions and (not multi_gpu or distrib.get_rank() == 0):
with open(args.save_predictions, 'w') as f:
f.write('\n'.join(agg['preds']))
if args.save_logits and (not multi_gpu or distrib.get_rank() == 0):
logits = torch.cat(agg['logits'], dim=0).cpu()
torch.save(logits, args.save_logits)
# report timings
if len(dur['data']) >= 20 and (not multi_gpu or distrib.get_rank() == 0):
ratios = [0.9, 0.95, 0.99]
for stage in dur:
lat = durs_to_percentiles(dur[stage], ratios)
for k in [0.99, 0.95, 0.9, 0.5]:
k_ = str(k).replace('.', '_')
dllogger.log(step=(), data={f'{stage}_latency_{k_}': lat[k]})
else:
print_once('Not enough samples to measure latencies.')
if __name__ == "__main__":
main()
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2 | mrcnn_tf2 | config | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parameters used to build Mask-RCNN model."""
from argparse import Namespace
CONFIG = Namespace(**dict(
# input pre-processing parameters
image_size=(832, 1344),
augment_input_data=True,
gt_mask_size=112,
# dataset specific parameters
num_classes=91,
skip_crowd_during_training=True,
use_category=True,
# Region Proposal Network
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
rpn_batch_size_per_im=256,
rpn_fg_fraction=0.5,
rpn_min_size=0.,
# Proposal layer.
batch_size_per_im=512,
fg_fraction=0.25,
fg_thresh=0.5,
bg_thresh_hi=0.5,
bg_thresh_lo=0.,
# Faster-RCNN heads.
fast_rcnn_mlp_head_dim=1024,
bbox_reg_weights=(10., 10., 5., 5.),
# Mask-RCNN heads.
include_mask=True, # whether or not to include mask branch. # ===== Not existing in MLPerf ===== #
mrcnn_resolution=28,
# training
train_rpn_pre_nms_topn=2000,
train_rpn_post_nms_topn=1000,
train_rpn_nms_threshold=0.7,
# evaluation
test_detections_per_image=100,
test_nms=0.5,
test_rpn_pre_nms_topn=1000,
test_rpn_post_nms_topn=1000,
test_rpn_nms_thresh=0.7,
# model architecture
min_level=2,
max_level=6,
num_scales=1,
aspect_ratios=[(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)],
anchor_scale=8.0,
# localization loss
rpn_box_loss_weight=1.0,
fast_rcnn_box_loss_weight=1.0,
mrcnn_weight_loss_mask=1.0,
# other
checkpoint_name_format='nvidia_mrcnn_tf2.ckpt'
))
|
PyTorch/Classification/GPUNet/configs/batch1/GV100 | GV100 | 0.85ms | [
{
"layer_type": "data",
"img_resolution": 288,
"distill": false
},
{
"layer_type": "head",
"num_in_channels": 3,
"num_out_channels": 24
},
{
"layer_type": "conv",
"num_in_channels": 24,
"num_out_channels": 24,
"stride": 1,
"kernel_size": 3,
"act": "relu",
"stage": 1
},
{
"layer_type": "fused_irb",
"num_in_channels": 24,
"num_out_channels": 64,
"stride": 2,
"expansion": 4,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 2
},
{
"layer_type": "fused_irb",
"num_in_channels": 64,
"num_out_channels": 64,
"stride": 1,
"expansion": 4,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 2
},
{
"layer_type": "fused_irb",
"num_in_channels": 64,
"num_out_channels": 96,
"stride": 2,
"expansion": 4,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 3
},
{
"layer_type": "fused_irb",
"num_in_channels": 96,
"num_out_channels": 96,
"stride": 1,
"expansion": 4,
"kernel_size": 3,
"act": "swish",
"use_se": false,
"stage": 3
},
{
"layer_type": "irb",
"num_in_channels": 96,
"num_out_channels": 160,
"stride": 2,
"expansion": 2,
"kernel_size": 3,
"act": "swish",
"use_se": true,
"stage": 4
},
{
"layer_type": "irb",
"num_in_channels": 160,
"num_out_channels": 288,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 5
},
{
"layer_type": "irb",
"num_in_channels": 288,
"num_out_channels": 288,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 5
},
{
"layer_type": "irb",
"num_in_channels": 288,
"num_out_channels": 288,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 5
},
{
"layer_type": "irb",
"num_in_channels": 288,
"num_out_channels": 288,
"stride": 1,
"expansion": 5,
"kernel_size": 3,
"act": "relu",
"use_se": false,
"stage": 5
},
{
"layer_type": "irb",
"num_in_channels": 288,
"num_out_channels": 448,
"stride": 2,
"expansion": 4,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "irb",
"num_in_channels": 448,
"num_out_channels": 448,
"stride": 1,
"expansion": 4,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "irb",
"num_in_channels": 448,
"num_out_channels": 448,
"stride": 1,
"expansion": 4,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "irb",
"num_in_channels": 448,
"num_out_channels": 448,
"stride": 1,
"expansion": 4,
"kernel_size": 3,
"act": "relu",
"use_se": true,
"stage": 6
},
{
"layer_type": "tail",
"num_in_channels": 448,
"num_out_channels": 1280,
"num_classes": 1000
}
] |
PyTorch/Classification/ConvNets/resnext101-32x4d | resnext101-32x4d | README | # ResNeXt101-32x4d For PyTorch
This repository provides a script and recipe to train the ResNeXt101-32x4d model to
achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Optimizer](#optimizer)
* [Data augmentation](#data-augmentation)
* [DALI](#dali)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Command-line options](#command-line-options)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Example plots](#example-plots)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)](#training-performance-nvidia-dgx-1-16gb-8x-v100-16gb)
* [Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)](#training-performance-nvidia-dgx-1-32gb-8x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX-1 16GB (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The ResNeXt101-32x4d is a model introduced in the [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf) paper.
It is based on regular ResNet model, substituting 3x3 convolutions inside the bottleneck block for 3x3 grouped convolutions.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision.
### Model architecture
![ResNextArch](./img/ResNeXtArch.png)
_Image source: [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf)_
Image shows difference between ResNet bottleneck block and ResNeXt bottleneck block.
ResNeXt101-32x4d model's cardinality equals to 32 and bottleneck width equals to 4.
### Default configuration
The following sections highlight the default configurations for the ResNeXt101-32x4d model.
#### Optimizer
This model uses SGD with momentum optimizer with the following hyperparameters:
* Momentum (0.875)
* Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly
scale the learning rate.
* Learning rate schedule - we use cosine LR schedule
* For bigger batch sizes (512 and up) we use linear warmup of the learning rate
during the first couple of epochs
according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677).
Warmup length depends on the total training length.
* Weight decay (WD)= 6.103515625e-05 (1/16384).
* We do not apply WD on Batch Norm trainable parameters (gamma/bias)
* Label smoothing = 0.1
* We train for:
* 90 Epochs -> 90 epochs is a standard for ImageNet networks
* 250 Epochs -> best possible accuracy.
* For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf).
#### Data augmentation
This model uses the following data augmentation:
* For training:
* Normalization
* Random resized crop to 224x224
* Scale from 8% to 100%
* Aspect ratio from 3/4 to 4/3
* Random horizontal flip
* For inference:
* Normalization
* Scale to 256x256
* Center crop to 224x224
### Feature support matrix
The following features are supported by this model:
| Feature | ResNeXt101-32x4d
|-----------------------|--------------------------
|[DALI](https://docs.nvidia.com/deeplearning/sdk/dali-release-notes/index.html) | Yes
|[APEX AMP](https://nvidia.github.io/apex/amp.html) | Yes |
#### Features
- NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader
with the DALI library. For more information about DALI, refer to the [DALI product documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html).
- [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as [Automatic Mixed Precision (AMP)](https://nvidia.github.io/apex/amp.html), which require minimal network code changes to leverage Tensor Cores performance. Refer to the [Enabling mixed precision](#enabling-mixed-precision) section for more details.
### DALI
We use [NVIDIA DALI](https://github.com/NVIDIA/DALI),
which speeds up data loading when CPU becomes a bottleneck.
DALI can use CPU or GPU, and outperforms the PyTorch native dataloader.
Run training with `--data-backends dali-gpu` or `--data-backends dali-cpu` to enable DALI.
For DGXA100 and DGX1 we recommend `--data-backends dali-cpu`.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), a library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval,
while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients.
In PyTorch, loss scaling can be easily applied by using scale_loss() method provided by AMP. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed.
For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage tensor cores performance.
To enable mixed precision, you can:
- Import AMP from APEX:
```python
from apex import amp
```
- Wrap model and optimizer in amp.initialize:
```python
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale="dynamic")
```
- Scale loss before backpropagation:
```python
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the ResNeXt101-32x4d model.
### Requirements
This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch 21.03-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer
* Supported GPUs:
* [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
* [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
* [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
### 1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/Classification/
```
### 2. Download and preprocess the dataset.
The ResNeXt101-32x4d script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge.
PyTorch can work directly on JPEGs, therefore, preprocessing/augmentation is not needed.
To train your model using mixed or TF32 precision with Tensor Cores or using FP32,
perform the following steps using the default parameters of the resnext101-32x4d model on the ImageNet dataset.
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. [Download the images](http://image-net.org/download-images).
2. Extract the training data:
```bash
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done
cd ..
```
3. Extract the validation data and move the images to subfolders:
```bash
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash
```
The directory in which the `train/` and `val/` directories are placed, is referred to as `<path to imagenet>` in this document.
### 3. Build the ResNeXt101-32x4d PyTorch NGC container.
```
docker build . -t nvidia_resnext101-32x4d
```
### 4. Start an interactive session in the NGC container to run training/inference.
```
nvidia-docker run --rm -it -v <path to imagenet>:/imagenet --ipc=host nvidia_resnext101-32x4d
```
### 5. Start training
To run training for a standard configuration (DGXA100/DGX1V, AMP/TF32/FP32, 90/250 Epochs),
run one of the scripts in the `./resnext101-32x4d/training` directory
called `./resnext101-32x4d/training/{AMP, TF32, FP32}/{ DGXA100, DGX1V }_resnext101-32x4d_{AMP, TF32, FP32}_{ 90, 250 }E.sh`.
Ensure ImageNet is mounted in the `/imagenet` directory.
Example:
`bash ./resnext101-32x4d/training/AMP/DGX1_resnext101-32x4d_AMP_250E.sh <path were to store checkpoints and logs>`
### 6. Start inference
You can download pretrained weights from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnext101_32x4d_pyt_amp/versions/20.06.0/zip -O resnext101_32x4d_pyt_amp_20.06.0.zip
unzip resnext101_32x4d_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file nvidia_resnext101-32x4d_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch resnext101-32x4d --pretrained-from-file nvidia_resnext101-32x4d_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
To run a non standard configuration use:
* For 1 GPU
* FP32
`python ./main.py --arch resnext101-32x4d -c fanin --label-smoothing 0.1 <path to imagenet>`
`python ./main.py --arch resnext101-32x4d -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
* For multiple GPUs
* FP32
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnext101-32x4d -c fanin --label-smoothing 0.1 <path to imagenet>`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnext101-32x4d -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
Use `python ./main.py -h` to obtain the list of available options in the `main.py` script.
### Command-line options:
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python main.py -h`
```
usage: main.py [-h] [--data-backend BACKEND] [--arch ARCH]
[--model-config CONF] [-j N] [--epochs N]
[--run-epochs N] [-b N] [--optimizer-batch-size N] [--lr LR]
[--lr-schedule SCHEDULE] [--warmup E] [--label-smoothing S]
[--mixup ALPHA] [--momentum M] [--weight-decay W]
[--bn-weight-decay] [--nesterov] [--print-freq N]
[--resume PATH] [--pretrained-from-file PATH]
[--static-loss-scale STATIC_LOSS_SCALE] [--dynamic-loss-scale]
[--prof N] [--amp] [--seed SEED] [--gather-checkpoints]
[--raport-file RAPORT_FILE] [--evaluate] [--training-only]
[--no-checkpoints] [--checkpoint-filename CHECKPOINT_FILENAME]
[--workspace DIR] [--memory-format {nchw,nhwc}]
DIR
PyTorch ImageNet Training
positional arguments:
DIR path to dataset
optional arguments:
-h, --help show this help message and exit
--data-backend BACKEND
data backend: pytorch | synthetic | dali-gpu | dali-cpu
(default: dali-cpu)
--arch ARCH, -a ARCH model architecture: resnet18 | resnet34 | resnet50 |
resnet101 | resnet152 | resnext50-32x4d |
resnext101-32x4d | resnext101-32x8d |
resnext101-32x8d-basic | se-resnext101-32x4d (default:
resnet50)
--model-config CONF, -c CONF
model configs: classic | fanin | grp-fanin | grp-
fanout(default: classic)
-j N, --workers N number of data loading workers (default: 5)
--epochs N number of total epochs to run
--run-epochs N run only N epochs, used for checkpointing runs
-b N, --batch-size N mini-batch size (default: 256) per gpu
--optimizer-batch-size N
size of a total batch size, for simulating bigger
batches using gradient accumulation
--lr LR, --learning-rate LR
initial learning rate
--lr-schedule SCHEDULE
Type of LR schedule: step, linear, cosine
--warmup E number of warmup epochs
--label-smoothing S label smoothing
--mixup ALPHA mixup alpha
--momentum M momentum
--weight-decay W, --wd W
weight decay (default: 1e-4)
--bn-weight-decay use weight_decay on batch normalization learnable
parameters, (default: false)
--nesterov use nesterov momentum, (default: false)
--print-freq N, -p N print frequency (default: 10)
--resume PATH path to latest checkpoint (default: none)
--pretrained-from-file PATH
load weights from here
--static-loss-scale STATIC_LOSS_SCALE
Static loss scale, positive power of 2 values can
improve amp convergence.
--dynamic-loss-scale Use dynamic loss scaling. If supplied, this argument
supersedes --static-loss-scale.
--prof N Run only N iterations
--amp Run model AMP (automatic mixed precision) mode.
--seed SEED random seed used for numpy and pytorch
--gather-checkpoints Gather checkpoints throughout the training, without
this flag only best and last checkpoints will be
stored
--raport-file RAPORT_FILE
file in which to store JSON experiment raport
--evaluate evaluate checkpoint/model
--training-only do not evaluate
--no-checkpoints do not store any checkpoints, useful for benchmarking
--checkpoint-filename CHECKPOINT_FILENAME
--workspace DIR path to directory where checkpoints will be stored
--memory-format {nchw,nhwc}
memory layout, nchw or nhwc
```
### Dataset guidelines
To use your own dataset, divide it in directories as in the following scheme:
- Training images - `train/<class id>/<image>`
- Validation images - `val/<class id>/<image>`
If your dataset's has number of classes different than 1000, you need to pass `--num_classes N` flag to the training script.
### Training process
All the results of the training will be stored in the directory specified with `--workspace` argument.
Script will store:
- most recent checkpoint - `checkpoint.pth.tar` (unless `--no-checkpoints` flag is used).
- checkpoint with best validation accuracy - `model_best.pth.tar` (unless `--no-checkpoints` flag is used).
- JSON log - in the file specified with `--raport-file` flag.
Metrics gathered through training:
- `train.loss` - training loss
- `train.total_ips` - training speed measured in images/second
- `train.compute_ips` - training speed measured in images/second, not counting data loading
- `train.data_time` - time spent on waiting on data
- `train.compute_time` - time spent in forward/backward pass
To restart training from checkpoint use `--resume` option.
To start training from pretrained weights (e.g. downloaded from NGC) use `--pretrained-from-file` option.
The difference between those two is that the pretrained weights contain only model weights,
and checkpoints, apart from model weights, contain optimizer state, LR scheduler state.
Checkpoints are suitable for dividing the training into parts, for example in order
to divide the training job into shorter stages, or restart training after infrastructure fail.
Pretrained weights can be used as a base for finetuning the model to a different dataset,
or as a backbone to detection models.
### Inference process
Validation is done every epoch, and can be also run separately on a checkpointed model.
`python ./main.py --arch resnext101-32x4d --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>`
Metrics gathered through training:
- `val.loss` - validation loss
- `val.top1` - validation top1 accuracy
- `val.top5` - validation top5 accuracy
- `val.total_ips` - inference speed measured in images/second
- `val.compute_ips` - inference speed measured in images/second, not counting data loading
- `val.data_time` - time spent on waiting on data
- `val.compute_time` - time spent on inference
To run inference on JPEG image, you have to first extract the model weights from checkpoint:
`python checkpoint2model.py --checkpoint-path <path to checkpoint> --weight-path <path where weights will be stored>`
Then run classification script:
`python classify.py --arch resnext101-32x4d --pretrained-from-file <path to weights from previous step> --precision AMP|FP32 --image <path to JPEG image>`
You can also run ImageNet validation on pretrained weights:
`python ./main.py --arch resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file <path to pretrained weights> -b <batch size> <path to imagenet>`
#### NGC Pretrained weights:
Pretrained weights can be downloaded from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnext101_32x4d_pyt_amp/versions/20.06.0/zip -O resnext101_32x4d_pyt_amp_20.06.0.zip
unzip resnext101_32x4d_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file nvidia_resnext101-32x4d_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch resnext101-32x4d --pretrained-from-file nvidia_resnext101-32x4d_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training, run:
* For 1 GPU
* FP32 (V100 GPUs only)
`python ./launch.py --model resnext101-32x4d --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model resnext101-32x4d --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model resnext101-32x4d --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* For multiple GPUs
* FP32 (V100 GPUs only)
`python ./launch.py --model resnext101-32x4d --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
#### Inference performance benchmark
To benchmark inference, run:
* FP32 (V100 GPUs only)
`python ./launch.py --model resnext101-32x4d --precision FP32 --mode benchmark_inference --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model resnext101-32x4d --precision TF32 --mode benchmark_inference --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model resnext101-32x4d --precision AMP --mode benchmark_inference --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
### Results
#### Training accuracy results
Our results were obtained by running the applicable training script the pytorch-20.12 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
| **Epochs** | **Mixed Precision Top1** | **TF32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 79.47 +/- 0.03 | 79.38 +/- 0.07 |
| 250 | 80.19 +/- 0.08 | 80.27 +/- 0.1 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
| **Epochs** | **Mixed Precision Top1** | **FP32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 79.49 +/- 0.05 | 79.40 +/- 0.10 |
| 250 | 80.26 +/- 0.11 | 80.06 +/- 0.06 |
##### Example plots
The following images show a 250 epochs configuration on a DGX-1V.
![ValidationLoss](./img/loss_plot.png)
![ValidationTop1](./img/top1_plot.png)
![ValidationTop5](./img/top5_plot.png)
#### Training performance results
Our results were obtained by running the applicable training script the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
| **GPUs** | **Throughput - TF32** | **Throughput - mixed precision** | **Throughput speedup (TF32 to mixed precision)** | **TF32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **TF32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 456 img/s | 1211 img/s | 2.65 x | 1.0 x | 1.0 x | ~28 hours | ~74 hours |
| 8 | 3471 img/s | 7925 img/s | 2.28 x | 7.6 x | 6.54 x | ~5 hours | ~10 hours |
##### Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 147 img/s | 587 img/s | 3.97 x | 1.0 x | 1.0 x | ~58 hours | ~228 hours |
| 8 | 1133 img/s | 4065 img/s | 3.58 x | 7.65 x | 6.91 x | ~9 hours | ~30 hours |
##### Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 144 img/s | 565 img/s | 3.9 x | 1.0 x | 1.0 x | ~60 hours | ~233 hours |
| 8 | 1108 img/s | 3863 img/s | 3.48 x | 7.66 x | 6.83 x | ~9 hours | ~31 hours |
#### Inference performance results
Our results were obtained by running the applicable training script the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 55 img/s | 17.95 ms | 20.61 ms | 22.0 ms |
| 2 | 105 img/s | 19.2 ms | 20.74 ms | 22.77 ms |
| 4 | 170 img/s | 23.65 ms | 24.66 ms | 28.0 ms |
| 8 | 336 img/s | 24.05 ms | 24.92 ms | 27.75 ms |
| 16 | 397 img/s | 40.77 ms | 40.44 ms | 40.65 ms |
| 32 | 452 img/s | 72.12 ms | 71.1 ms | 71.35 ms |
| 64 | 500 img/s | 130.9 ms | 128.19 ms | 128.64 ms |
| 128 | 527 img/s | 249.57 ms | 242.77 ms | 243.63 ms |
| 256 | 533 img/s | 496.76 ms | 478.04 ms | 480.42 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 43 img/s | 23.08 ms | 24.18 ms | 27.82 ms |
| 2 | 84 img/s | 23.65 ms | 24.64 ms | 27.87 ms |
| 4 | 164 img/s | 24.38 ms | 27.33 ms | 27.95 ms |
| 8 | 333 img/s | 24.18 ms | 25.92 ms | 28.3 ms |
| 16 | 640 img/s | 25.4 ms | 26.53 ms | 29.47 ms |
| 32 | 1195 img/s | 27.72 ms | 29.9 ms | 32.19 ms |
| 64 | 1595 img/s | 41.89 ms | 40.15 ms | 41.08 ms |
| 128 | 1699 img/s | 79.45 ms | 75.65 ms | 76.08 ms |
| 256 | 1746 img/s | 154.68 ms | 145.76 ms | 146.52 ms |
##### Inference performance: NVIDIA T4
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 56 img/s | 18.18 ms | 20.45 ms | 24.58 ms |
| 2 | 109 img/s | 18.77 ms | 21.53 ms | 26.21 ms |
| 4 | 151 img/s | 26.89 ms | 27.81 ms | 30.94 ms |
| 8 | 164 img/s | 48.99 ms | 49.44 ms | 49.91 ms |
| 16 | 172 img/s | 93.51 ms | 93.73 ms | 94.16 ms |
| 32 | 180 img/s | 178.83 ms | 178.41 ms | 179.07 ms |
| 64 | 178 img/s | 361.95 ms | 360.7 ms | 362.32 ms |
| 128 | 172 img/s | 756.93 ms | 750.21 ms | 752.45 ms |
| 256 | 161 img/s | 1615.79 ms | 1580.61 ms | 1583.43 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 44 img/s | 23.0 ms | 25.77 ms | 29.41 ms |
| 2 | 87 img/s | 23.14 ms | 26.55 ms | 30.97 ms |
| 4 | 178 img/s | 22.8 ms | 24.2 ms | 29.38 ms |
| 8 | 371 img/s | 21.98 ms | 25.34 ms | 29.61 ms |
| 16 | 553 img/s | 29.47 ms | 29.52 ms | 31.14 ms |
| 32 | 578 img/s | 56.56 ms | 56.04 ms | 56.37 ms |
| 64 | 591 img/s | 110.82 ms | 109.37 ms | 109.83 ms |
| 128 | 597 img/s | 220.44 ms | 215.33 ms | 216.3 ms |
| 256 | 598 img/s | 439.3 ms | 428.2 ms | 431.46 ms |
## Release notes
### Changelog
1. October 2019
* Initial release
2. July 2020
* Added A100 scripts
* Updated README
3. February 2021
* Moved from APEX AMP to Native AMP
### Known issues
There are no known issues with this model.
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
class Accelerator(Parameter):
AMP = "amp"
CUDA = "cuda"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
|
TensorFlow/Translation/GNMT/variable_mgr | variable_mgr | variable_mgr_util | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import print_function
import collections as pycoll
import operator
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradients_impl
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
name='cond_if_grad_has_inf_nan'
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
Note that variable creation only happen when building the model graph on the
first device (see how it sets the 'reuse' parameter in
VariableMgr.*.create_outer_variable_scope()). That means, for all other
devices, the variable scope will reuse the variables created before, which
requires that we set the caching_device correctly as otherwise it may not be
able to find the previously created variable and will create a new one. This
requires when building the model graph on different devices, variables with
the same name should have same size.
TODO(laigd): consider adding tests or verification logic to enforce this, or
refactor it.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = tf.contrib.staging.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = tf.contrib.staging.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
if any(isinstance(g, tf.IndexedSlices) for g in grads):
# TODO(reedwm): All-reduce IndexedSlices more effectively.
grad = gradients_impl._AggregateIndexedSlicesGradients(grads) # pylint: disable=protected-access
else:
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.scalar_mul(1.0 / len(grads), grad)
v = grad_and_vars[0][1]
if check_inf_nan:
with tf.name_scope('check_for_inf_and_nan'):
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | face_label_map | item {
name: "face"
id: 1
display_name: "face"
}
|
PyTorch/DrugDiscovery/MoFlow/moflow/runtime | runtime | common | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glob import glob
import logging
import os
from typing import List, Optional, Tuple
import torch
from moflow.model.model import MoFlow
CHECKPOINT_PATTERN = 'model_snapshot_epoch_%s'
def _sort_checkpoints(paths: List[str]) -> List[str]:
return sorted(paths, key=lambda x: int(x.split('_')[-1]))
def save_state(dir: str, model: MoFlow, optimizer: torch.optim.Optimizer, ln_var: float, epoch: int, keep: int = 1) -> None:
"""Save training state in a given dir. This checkpoint can be used to resume training or run inference
with the trained model. This function will keep up to <keep> newest checkpoints and remove the oldest ones.
"""
save_path = os.path.join(dir, CHECKPOINT_PATTERN % (epoch + 1))
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'ln_var': ln_var,
'epoch': epoch,
}
torch.save(state, save_path)
if keep > 0:
filenames = glob(os.path.join(dir, CHECKPOINT_PATTERN % '*'))
if len(filenames) <= keep:
return
to_del = _sort_checkpoints(filenames)[:-keep]
for path in to_del:
os.remove(path)
def load_state(path: str, model: MoFlow, device: torch.device, optimizer: Optional[torch.optim.Optimizer] = None) -> Tuple[int, float]:
"""Load model's and optimizer's state from a given file.
This function returns the number of epochs the model was trained for and natural logarithm of variance
the for the distribution of the latent space.
"""
state = torch.load(path, map_location=device)
model.load_state_dict(state['model'])
if optimizer is not None:
optimizer.load_state_dict(state['optimizer'])
return state['epoch'], state['ln_var']
def get_newest_checkpoint(model_dir: str, validate: bool = True) -> str:
"""Find newest checkpoint in a given directory.
If validate is set to True, this function will also verify that the file can be loaded and
select older checkpoint if neccessary.
"""
filenames = glob(os.path.join(model_dir, CHECKPOINT_PATTERN % '*'))
if len(filenames) == 0:
logging.info(f'No checkpoints available')
return None
paths = _sort_checkpoints(filenames)
if validate:
for latest_path in paths[::-1]:
try:
torch.load(latest_path, map_location='cpu')
break
except:
logging.info(f'Checkpoint {latest_path} is corrupted')
else:
logging.info(f'All available checkpoints were corrupted')
return None
else:
latest_path = paths[-1]
logging.info(f'Found checkpoint {latest_path}')
return latest_path
|
TensorFlow2/Segmentation/MaskRCNN | MaskRCNN | README | # Mask-RCNN For TensorFlow 2
This repository provides a script and recipe to train the Mask-RCNN model to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [TF32](#tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Multi-dataset](#multi-dataset)
* [Training process](#training-process)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training stability test](#training-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
* [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
Mask R-CNN is a convolution-based neural network for the task of object instance segmentation. The paper describing the model can be found [here](https://arxiv.org/abs/1703.06870). NVIDIA’s Mask R-CNN is an optimized version of [Google's TPU implementation](https://github.com/tensorflow/tpu/tree/master/models/official/mask_rcnn), leveraging mixed precision arithmetic using Tensor Cores while maintaining target accuracy.
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2.2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
This repository also contains scripts to interactively launch training, benchmarking and inference routines in a Docker container.
The major differences between the official implementation of the paper and our version of Mask R-CNN are as follows:
- Mixed precision support with [TensorFlow AMP](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-user-guide/index.html#tfamp)
- Gradient accumulation to simulate larger batches
- Custom fused CUDA kernels for faster computations
There are other publicly NVIDIA available implementations of Mask R-CNN:
- [NVIDIA PyTorch implementation](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/MaskRCNN)
- [Matterport](https://github.com/matterport/Mask_RCNN)
- [Tensorpack](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN)
### Model architecture
Mask R-CNN builds on top of Faster R-CNN adding a mask head for the task of image segmentation.
The architecture consists of the following:
- ResNet-50 backbone with Feature Pyramid Network (FPN)
- Region proposal network (RPN) head
- RoI Align
- Bounding and classification box head
- Mask head
![Architecture](documentation/architecture.png)
Figure 1. Diagram of Mask R-CNN framework from [original paper](https://arxiv.org/abs/1703.06870)
### Default configuration
The Mask R-CNN configuration and the hyper-parameters for training and testing purposes are in separate files.
The default configuration of this model can be found at `mrcnn_tf2/config.py`.
The default configuration is as follows:
- Feature extractor:
- Images resized with aspect ratio maintained and smaller side length between [832,1344]
- Ground Truth mask size 112
- Backbone network weights are frozen after second epoch
- RPN:
- Anchor stride set to 16
- Anchor sizes set to (32, 64, 128, 256, 512)
- Foreground IOU Threshold set to 0.7, Background IOU Threshold set to 0.3
- RPN target fraction of positive proposals set to 0.5
- Train Pre-NMS Top proposals set to 2000 per FPN layer
- Train Post-NMS Top proposals set to 1000
- Test Pre-NMS Top proposals set to 1000 per FPN layer
- Test Post-NMS Top proposals set to 1000
- RPN NMS Threshold set to 0.7
- RoI heads:
- Foreground threshold set to 0.5
- Batch size per image set to 512
- A positive fraction of batch set to 0.25
The default hyper-parameters can be found at `mrcnn_tf2/arguments.py`.
These hyperparameters can be overridden through the command-line options, in the launch scripts.
### Feature support matrix
The following features are supported by this model:
| **Feature** | **Mask R-CNN** |
-------------|---------------------|
| Automatic mixed precision (AMP) | Yes |
| Accelerated Linear Algebra (XLA)| Yes |
#### Features
**Automatic Mixed Precision (AMP)**
This implementation of Mask-RCNN uses AMP to implement mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying just a few lines of code.
**XLA support (experimental)**
XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. The results are improvements in speed and memory usage: most internal benchmarks run ~1.1-1.5x faster after XLA is enabled.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally.
In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models.
To enable mixed precision, you can simply add the values to the environmental variables inside your training script:
- Enable TF-AMP graph rewrite:
```
os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1"
```
- Enable Automated Mixed Precision:
```
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
```
#### TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the Mask R-CNN model.
### Requirements
This repository contains Dockerfile which extends the TensorFlow 2 NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
- [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
- TensorFlow 21.02 NGC container
- Supported GPUs:
- [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
- [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/)
- [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation:
- [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
- [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry)
- Running [framework name - link to topic]
For those unable to use the TensorFlow 2 NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the Mask R-CNN model on the COCO 2017 dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. Clone the repository.
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/TensorFlow/Segmentation/MaskRCNN
```
2. Build the Mask R-CNN TensorFlow NGC container.
```bash
nvidia-docker build -t nvidia_mrcnn_tf2 .
```
3. Start an interactive session in the NGC container to run training/inference.
Run the following command to launch the Docker container.
```bash
docker run --gpus all -it --rm --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 nvidia_mrcnn_tf2
```
If you want to reuse the dataset and pretrained ResNet-50 weights between runs, **(recommended)**, use `-v [data directory]:/data -v [weights directory]:/weights` to mount your directories inside the container:
```bash
docker run --gpus all -it --rm --shm-size=2g --ulimit memlock=-1 --ulimit stack=67108864 -v [data directory]:/data -v [weights directory]:/weights nvidia_mrcnn_tf2
```
The contents of `/data` and `/weights` will be downloaded in the following steps.
4. Download and preprocess the dataset.
This repository provides scripts to download and extract the [COCO 2017 dataset](http://cocodataset.org/#download).
If you already have the data, then you do not need to run the following script; instead proceed to the next step.
Data will be downloaded to the `[data directory]` directory provided in step 3.
```bash
cd dataset
bash download_and_preprocess_coco.sh /data
```
5. Download the pre-trained ResNet-50 weights.
This repository also provides scripts to download the pre-trained weights of ResNet-50 backbone.
The following script will download the pre-trained weights to `/weights`.
```bash
python scripts/download_weights.py --save_dir=/weights
```
6. Start training.
To run training with a default configuration (on 1/8 GPUs, AMP/FP32), run a `scripts/train.py` script:
```bash
python scripts/train.py --gpus {1,8} [--amp]
```
The above script trains a model and evaluates the COCO 2017 dataset using the content in the `/data` and `/weights` directories. Refer to the [Advanced](#advanced) section or run `python scripts/train.py --help` for more details.
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
Descriptions of the key scripts and folders are provided below.
- `mrcnn_tf2` - Contains source code of this model.
- `main.py` - This is the entry point that provides advanced configuration of training and evaluation processes.
- `scripts/` - A folder with utility scripts that simplifies running of this model.
- `train.py` - Runs training followed by evaluation.
- `evaluate.py` - Runs evaluation.
- `inference.py` - Runs inference.
- `benchmark_training.py` - Script for running train performance benchmarks.
- `benchmark_inference.py` - Script for running inference performance benchmarks.
- `download_weights.sh` - Can be used to download pre-trained weights for backbone models.
- `dataset/` - A folder that contains shell scripts and Python files to download the dataset.
### Parameters
Below you will find a description of the most important parameters accepted by scripts. See [Command-line options](#command-line-options) for list of all available options.
#### Utility script parameters
All the scripts in the `scripts/` directory accept the following parameters:
- `--batch_size N`- Size of the training or evaluation batch size (depends on the script).
- `--amp` - When provided, enables automatic mixed precision.
- `--no_xla` - When provided, disables XLA (accelerated linear algebra).
- `--data_dir [path]` - Path to the directory that contains TFRecords of COCO 2017. Defaults to `/data`.
- `--model_dir [path]` - Output directory for information related to the model that includes model checkpoints. Defaults to `/tmp/model`.
- `--weights_dir [path]` - Directory containing pre-trained ResNet50 weights. Defaults to `/weights`.
Additionally, training scripts also accept some specific parameters:
- `train.py`
- `--gpus N` - Number of GPUs to use during training.
- `--no_eval` - When provided, disables evaluation after training.
- `benchmark_training.py`
- `--gpus N` - Number of GPUs to use during training.
Note: Any additional flags not specified above will be passed to `python main.py`. Refer to `python main.py --help` for a full list of available fags.
#### Main script parameters
For most use cases, the scripts in `scripts/` should be sufficient, but if you need more control over the model, you can also directly execute `main.py`.
To get the list of all parameters accepted by `main.py`, run `python main.py --help`.
### Command-line options
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
```
python main.py --help
```
The following example output is printed when running the model:
```
usage: main.py MODE [arguments...]
NVIDIA implementation of MastRCNN for TensorFlow 2.x
Runtime:
MODE One of supported execution modes:
train - run in training mode
eval - run evaluation on eval data split
infer - run inference on eval data split
--data_dir DIR Input directory containing the dataset (default: /data)
--model_dir DIR Output directory for information related to the model (default: /results)
--backbone_checkpoint FILE Pretrained checkpoint for resnet (default: /weights/rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp)
--eval_file FILE Path to the validation json file (default: /data/annotations/instances_val2017.json)
--epochs EPOCHS Number of training epochs (default: 12)
--steps_per_epoch STEPS_PER_EPOCH Number of steps (batches) per epoch. Defaults to dataset size divided by batch size. (default: None)
--eval_samples N Number of evaluation samples (default: None)
Hyperparameters:
--train_batch_size N Batch size (per GPU) used during training (default: 4)
--eval_batch_size N Batch size used during evaluation (default: 8)
--seed SEED Set a constant seed for reproducibility (default: None)
--l2_weight_decay L2D Weight of l2 regularization (default: 0.0001)
--init_learning_rate LR Initial learning rate (default: 0.0)
--learning_rate_values [D [D ...]] Learning rate decay levels that are then scaled by global batch size (default: [0.01, 0.001, 0.0001])
--learning_rate_boundaries [N [N ...]] Steps (in epochs) at which learning rate changes (default: [0.3, 8.0, 10.0])
--momentum MOMENTUM Optimizer momentum (default: 0.9)
--finetune_bn Is batchnorm finetuned training mode (default: False)
--use_synthetic_data Use synthetic input data, meant for testing only (default: False)
--xla Enable XLA JIT Compiler (default: False)
--amp Enable automatic mixed precision (default: False)
Logging:
--log_file FILE Output file for DLLogger logs (default: mrcnn-dlll.json)
--log_every N Log performance every N steps (default: 100)
--log_warmup_steps N Number of steps that will be ignored when collecting perf stats (default: 100)
--log_graph Print details about TF graph (default: False)
--log_tensorboard PATH When provided saves tensorboard logs to given dir (default: None)
Utility:
-h, --help Show this help message and exit
-v, --verbose Displays debugging logs (default: False)
--eagerly Runs model in eager mode. Use for debugging only as it reduces performance. (default: False)
```
### Getting the data
The Mask R-CNN model was trained on the COCO 2017 dataset. This dataset comes with a training and validation set.
This repository contains the `./dataset/download_and_preprocess_coco.sh` script which automatically downloads and preprocesses the training and validation sets, saving them to `tfrecord` files.
#### Dataset guidelines
The `tfrecord` files are fed to the model through `tf.data.TFRecordDataset()` to achieve high performance.
First, the images are normalized using predefined, channel-wise values (offset `0.485, 0.456, 0.406`, scale `0.229, 0.224, 0.225`). Then, they are augmented (random vertical flip) and resized/padded. The resizing maintains the original aspect ratio while setting the smaller side length to be between `832` and `1344`.
The bounding boxes and masks are processed accordingly so that they match the processed images.
#### Multi-dataset
This implementation is tuned for the COCO 2017 dataset. Using other datasets is possible, but may require changes to the code (data loader) and tuning some hyperparameters (for example, learning rate, number of iterations).
In the current implementation, the data loader works with TFRecord files. If you would like to change the format of the input data, you should substitute the `Dataset` class which you can find in `mrcnn_tf2/dataset/dataset.py`.
### Training process
Training is performed using the `scripts/train.py` script which runs `main.py` with the appropriate flags.
The results are displayed in the console and are saved in `./mrcnn-dll.json` (can be overridden by `--log_file`) in a form of [DLLogger](https://github.com/NVIDIA/dllogger) logs in which you can find:
- Full configuration used during training
- Losses, learning rate and performance metrics for steps
- Final losses
- Average performance metrics
Additionally, checkpoints will be saved to `/tmp/model` (can be overridden by `--model_dir`).
### Inference process
Inference is performed using the `scripts/evaluate.py` script which runs `main.py` with the appropriate flags.
The results are displayed in the console and are saved in `./mrcnn-dll.json` (can be overridden by `--log_file`) in a form of [DLLogger](https://github.com/NVIDIA/dllogger) logs in which you can find:
- Full configuration used during the evaluation
- Evaluation metrics
- Average performance metrics
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To run training benchmarking on a selected number of GPUs with either AMP or TF32/FP32 precision, run the following script:
```bash
python scripts/benchmark_training.py --gpus {1,8} --batch_size {2,4} [--amp]
```
#### Inference performance benchmark
To run inference benchmarking on a single GPU with either AMP or TF32/FP32 precision, run the following script:
```bash
python scripts/benchmark_inference.py --batch_size {2,4,8} [--amp]
```
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `python scripts/train.py --gpus 8 --batch_size 4 [--amp]` training script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs.
| GPUs | Batch size / GPU | Precision | Final AP BBox | Final AP Segm | Time to train [h] | Time to train speedup |
|:----:|:----------------:|:---------:|:-------------:|:-------------:|:-----------------:|:---------------------:|
| 8 | 2 | TF32 | 0.3796 | 0.3444 | 4.81 | - |
| 8 | 2 | AMP | 0.3795 | 0.3443 | 3.77 | 1.27 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `python scripts/train.py --gpus 8 --batch_size 2 [--amp]` training script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs.
| GPUs | Batch size / GPU | Precision | Final AP BBox | Final AP Segm | Time to train [h] | Time to train speedup |
|:----:|:----------------:|:---------:|:-------------:|:-------------:|:-----------------:|:---------------------:|
| 8 | 2 | FP32 | 0.3793 | 0.3442 | 11.37 | - |
| 8 | 2 | AMP | 0.3792 | 0.3444 | 9.01 | 1.26 |
**Learning curves**
The following image shows the training loss as a function of iteration for training using DGX A100 (TF32 and TF-AMP) and DGX-1 V100 (FP32 and TF-AMP).
![LearningCurves](documentation/learning_curves.png)
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the `python scripts/benchmark_training.py --gpus {1,8} --batch_size {4,8,16} [--amp]` training script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in images per second) were averaged over 200 steps omitting the first 100 warm-up steps.
| GPUs | Batch size / GPU | Throughput - TF32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 2 | 13.44 | 18.26 | 1.35 | - | - |
| 1 | 4 | 18.41 | 28.58 | 1.55 | - | - |
| 8 | 2 | 84.29 | 87.31 | 1.03 | 6.27 | 4.78 |
| 8 | 4 | 103.80 | 114.45 | 1.10 | 5.63 | 4.04 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX-1 (8x V100 16GB)
Our results were obtained by running the `python scripts/benchmark_training.py --gpus {1,8} --batch_size {2,4} [--amp]` training script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Performance numbers (in images per second) were averaged over 200 steps omitting the first 100 warm-up steps.
| GPUs | Batch size / GPU | Throughput - FP32 [img/s] | Throughput - mixed precision [img/s] | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision |
|:----:|:----------------:|:-------------------------:|:------------------------------------:|:-------------------------------------------:|:-------------------:|:------------------------------:|
| 1 | 2 | 7.57 | 14.47 | 1.91 | - | - |
| 1 | 4 | 8.51 | 19.35 | 2.27 | - | - |
| 8 | 2 | 44.55 | 53.40 | 1.37 | 5.26 | 3.69 |
| 8 | 4 | 50.56 | 58.33 | 1.15 | 6.67 | 4.03 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running the `python scripts/benchmark_inference.py --batch_size {8,16,24} [--amp]` benchmarking script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX A100 (1x A100 80GB) GPU.
TF32
| Batch size | Throughput Avg [img/s] | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|:----------:|:----------------------:|:-----------:|:-----------:|:-----------:|:-----------:|
| 6 | 39.23 | 0.1530 | 0.1540 | 0.1542 | 0.1546 |
| 12 | 42.55 | 0.2654 | 0.2840 | 0.2875 | 0.2945 |
| 24 | 47.92 | 0.5007 | 0.5248 | 0.5294 | 0.5384 |
FP16
| Batch size | Throughput Avg [img/s] | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|:----------:|:----------------------:|:-----------:|:-----------:|:-----------:|:-----------:|
| 6 | 60.79 | 0.0987 | 0.0988 | 0.1000 | 0.1005 |
| 12 | 76.23 | 0.1574 | 0.1614 | 0.1621 | 0.1636 |
| 24 | 80.67 | 0.2975 | 0.3025 | 0.3035 | 0.3054 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
Our results were obtained by running the `python scripts/benchmark_inference.py --batch_size {6,12,24} [--amp]` benchmarking script in the TensorFlow 2.x 21.02-py3 NGC container on NVIDIA DGX-1 with (1x V100 16GB) GPU.
FP32
| Batch size | Throughput Avg [img/s] | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|:----------:|:----------------------:|:-----------:|:-----------:|:-----------:|:-----------:|
| 6 | 18.56 | 0.3234 | 0.3263 | 0.3269 | 0.3280 |
| 12 | 20.50 | 0.5854 | 0.5920 | 0.5933 | 0.5958 |
| 24 | OOM | - | - | - | - |
FP16
| Batch size | Throughput Avg [img/s] | Latency Avg | Latency 90% | Latency 95% | Latency 99% |
|:----------:|:----------------------:|:-----------:|:-----------:|:-----------:|:-----------:|
| 6 | 35.46 | 0.1692 | 0.1705 | 0.1707 | 0.1712 |
| 12 | 41.44 | 0.2896 | 0.2937 | 0.2945 | 0.2960 |
| 24 | 42.53 | 0.5643 | 0.5718 | 0.5733 | 0.5761 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
## Release notes
### Changelog
February 2021
- Updated implementation to Tensorflow 2, using Keras API and Distributed strategy
- ResNet50 checkpoint now is being downloaded from NVIDIA NGC
- Training using batch size of 8 and 16 can result in unexpected hangs in DGX A100 80GB.
August 2020
- Separated implementation for TensorFlow `1.1x` and `2.x`. New implementation is TF `1.1x` version.
- Recreated runtime part of the implementation.
June 2020
- Updated accuracy tables with A100 results
- Updated training and inference performance tables with A100 results
November 2019
- Initial release
### Known issues
- Out of memory errors can occur when running training, V100, 8GPUs, BS4, FP32.
- Errors can occur when running training with BS1.
- The behavior of the model can be unstable when running with TensorFlow XLA enabled.
|
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools | dataset_tools | create_oid_tf_record | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Creates TFRecords of Open Images dataset for object detection.
Example usage:
python object_detection/dataset_tools/create_oid_tf_record.py \
--input_box_annotations_csv=/path/to/input/annotations-human-bbox.csv \
--input_image_label_annotations_csv=/path/to/input/annotations-label.csv \
--input_images_directory=/path/to/input/image_pixels_directory \
--input_label_map=/path/to/input/labels_bbox_545.labelmap \
--output_tf_record_path_prefix=/path/to/output/prefix.tfrecord
CSVs with bounding box annotations and image metadata (including the image URLs)
can be downloaded from the Open Images GitHub repository:
https://github.com/openimages/dataset
This script will include every image found in the input_images_directory in the
output TFRecord, even if the image has no corresponding bounding box annotations
in the input_annotations_csv. If input_image_label_annotations_csv is specified,
it will add image-level labels as well. Note that the information of whether a
label is positivelly or negativelly verified is NOT added to tfrecord.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import contextlib2
import pandas as pd
import tensorflow as tf
from object_detection.dataset_tools import oid_tfrecord_creation
from object_detection.dataset_tools import tf_record_creation_util
from object_detection.utils import label_map_util
tf.flags.DEFINE_string('input_box_annotations_csv', None,
'Path to CSV containing image bounding box annotations')
tf.flags.DEFINE_string('input_images_directory', None,
'Directory containing the image pixels '
'downloaded from the OpenImages GitHub repository.')
tf.flags.DEFINE_string('input_image_label_annotations_csv', None,
'Path to CSV containing image-level labels annotations')
tf.flags.DEFINE_string('input_label_map', None, 'Path to the label map proto')
tf.flags.DEFINE_string(
'output_tf_record_path_prefix', None,
'Path to the output TFRecord. The shard index and the number of shards '
'will be appended for each output shard.')
tf.flags.DEFINE_integer('num_shards', 100, 'Number of TFRecord shards')
FLAGS = tf.flags.FLAGS
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
required_flags = [
'input_box_annotations_csv', 'input_images_directory', 'input_label_map',
'output_tf_record_path_prefix'
]
for flag_name in required_flags:
if not getattr(FLAGS, flag_name):
raise ValueError('Flag --{} is required'.format(flag_name))
label_map = label_map_util.get_label_map_dict(FLAGS.input_label_map)
all_box_annotations = pd.read_csv(FLAGS.input_box_annotations_csv)
if FLAGS.input_image_label_annotations_csv:
all_label_annotations = pd.read_csv(FLAGS.input_image_label_annotations_csv)
all_label_annotations.rename(
columns={'Confidence': 'ConfidenceImageLabel'}, inplace=True)
else:
all_label_annotations = None
all_images = tf.gfile.Glob(
os.path.join(FLAGS.input_images_directory, '*.jpg'))
all_image_ids = [os.path.splitext(os.path.basename(v))[0] for v in all_images]
all_image_ids = pd.DataFrame({'ImageID': all_image_ids})
all_annotations = pd.concat(
[all_box_annotations, all_image_ids, all_label_annotations])
tf.logging.log(tf.logging.INFO, 'Found %d images...', len(all_image_ids))
with contextlib2.ExitStack() as tf_record_close_stack:
output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords(
tf_record_close_stack, FLAGS.output_tf_record_path_prefix,
FLAGS.num_shards)
for counter, image_data in enumerate(all_annotations.groupby('ImageID')):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000,
counter)
image_id, image_annotations = image_data
# In OID image file names are formed by appending ".jpg" to the image ID.
image_path = os.path.join(FLAGS.input_images_directory, image_id + '.jpg')
with tf.gfile.Open(image_path) as image_file:
encoded_image = image_file.read()
tf_example = oid_tfrecord_creation.tf_example_from_annotations_data_frame(
image_annotations, label_map, encoded_image)
if tf_example:
shard_idx = int(image_id, 16) % FLAGS.num_shards
output_tfrecords[shard_idx].write(tf_example.SerializeToString())
if __name__ == '__main__':
tf.app.run()
|
CUDA-Optimized/FastSpeech | FastSpeech | test_sentences | You can call me directly at four two five seven zero three seven three four four or my cell four two five four four four seven four seven four or send me a meeting request with all the appropriate information.
To deliver interfaces that are significantly better suited to create and process RFC eight twenty one, RFC eight twenty two, RFC nine seventy seven, and MIME content.
was executed on a gibbet in front of his victim's house.
For a while the preacher addresses himself to the congregation at large, who listen attentively.
he put them also to the sword.
The nature of the protective assignment. |
PyTorch/Recommendation/NCF | NCF | neumf_constants | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
USER_CHANNEL_NAME = 'user_ch'
ITEM_CHANNEL_NAME = 'item_ch'
LABEL_CHANNEL_NAME = 'label_ch'
TEST_SAMPLES_PER_SERIES = 'test_samples_per_series'
|
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | perf_config | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
|
PyTorch/SpeechRecognition/QuartzNet/scripts | scripts | preprocess_librispeech | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env bash
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/train-clean-100 \
--dest_dir /datasets/LibriSpeech/train-clean-100-wav \
--output_json /datasets/LibriSpeech/librispeech-train-clean-100-wav.json \
--speed 0.9 1.1
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/train-clean-360 \
--dest_dir /datasets/LibriSpeech/train-clean-360-wav \
--output_json /datasets/LibriSpeech/librispeech-train-clean-360-wav.json \
--speed 0.9 1.1
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/train-other-500 \
--dest_dir /datasets/LibriSpeech/train-other-500-wav \
--output_json /datasets/LibriSpeech/librispeech-train-other-500-wav.json \
--speed 0.9 1.1
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/dev-clean \
--dest_dir /datasets/LibriSpeech/dev-clean-wav \
--output_json /datasets/LibriSpeech/librispeech-dev-clean-wav.json
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/dev-other \
--dest_dir /datasets/LibriSpeech/dev-other-wav \
--output_json /datasets/LibriSpeech/librispeech-dev-other-wav.json
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/test-clean \
--dest_dir /datasets/LibriSpeech/test-clean-wav \
--output_json /datasets/LibriSpeech/librispeech-test-clean-wav.json
python ./utils/convert_librispeech.py \
--input_dir /datasets/LibriSpeech/test-other \
--dest_dir /datasets/LibriSpeech/test-other-wav \
--output_json /datasets/LibriSpeech/librispeech-test-other-wav.json
|
PyTorch/Forecasting/TFT/triton/deployment_toolkit | deployment_toolkit | dump | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
|
TensorFlow/LanguageModeling/BERT | BERT | run_classifier | # coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import modeling
import optimization
import tokenization
import tensorflow as tf
import horovod.tensorflow as hvd
import time
from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags
from utils.gpu_affinity import set_affinity
import utils.dllogger_class
from dllogger import Verbosity
from utils.create_glue_data import *
import numpy as np
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"dllog_path", "/results/bert_dllog.json",
"filename where dllogger writes to")
flags.DEFINE_string(
"optimizer_type", "lamb",
"Optimizer type : adam or lamb")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_bool("use_trt", False, "Whether to use TF-TRT")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("display_loss_steps", 10,
"How often to print loss from estimator")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("num_accumulation_steps", 1,
"Number of accumulation steps before gradient update"
"Global batch size = num_accumulation_steps * train_batch_size")
flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.")
flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.")
flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training,
drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.io.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
compute_type=tf.float32)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias, name='cls_logits')
probabilities = tf.nn.softmax(logits, axis=-1, name='cls_probabilities')
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1, name='cls_per_example_loss')
loss = tf.reduce_mean(per_example_loss, name='cls_loss')
return (loss, per_example_loss, logits, probabilities)
def get_frozen_tftrt_model(bert_config, shape, num_labels, use_one_hot_embeddings, init_checkpoint):
tf_config = tf.compat.v1.ConfigProto()
tf_config.gpu_options.allow_growth = True
output_node_names = ['loss/cls_loss', 'loss/cls_per_example_loss', 'loss/cls_logits', 'loss/cls_probabilities']
with tf.Session(config=tf_config) as tf_sess:
input_ids = tf.placeholder(tf.int32, shape, 'input_ids')
input_mask = tf.placeholder(tf.int32, shape, 'input_mask')
segment_ids = tf.placeholder(tf.int32, shape, 'segment_ids')
label_ids = tf.placeholder(tf.int32, (None), 'label_ids')
create_model(bert_config, False, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf_sess.run(tf.global_variables_initializer())
print("LOADED!")
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
else:
init_string = ", *NOTTTTTTTTTTTTTTTTTTTTT"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
frozen_graph = tf.graph_util.convert_variables_to_constants(tf_sess,
tf_sess.graph.as_graph_def(), output_node_names)
num_nodes = len(frozen_graph.node)
print('Converting graph using TensorFlow-TensorRT...')
from tensorflow.python.compiler.tensorrt import trt_convert as trt
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=output_node_names,
max_workspace_size_bytes=(4096 << 20) - 1000,
precision_mode = "FP16" if FLAGS.amp else "FP32",
minimum_segment_size=4,
is_dynamic_op=True,
maximum_cached_engines=1000
)
frozen_graph = converter.convert()
print('Total node count before and after TF-TRT conversion:',
num_nodes, '->', len(frozen_graph.node))
print('TRT node count:',
len([1 for n in frozen_graph.node if str(n.op) == 'TRTEngineOp']))
with tf.io.gfile.GFile("frozen_modelTRT.pb", "wb") as f:
f.write(frozen_graph.SerializeToString())
return frozen_graph
def model_fn_builder(task_name, bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps,
use_one_hot_embeddings, hvd=None):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
if task_name == "cola":
FN, FN_op = tf.metrics.false_negatives(labels=label_ids, predictions=predictions)
FP, FP_op = tf.metrics.false_positives(labels=label_ids, predictions=predictions)
TP, TP_op = tf.metrics.true_positives(labels=label_ids, predictions=predictions)
TN, TN_op = tf.metrics.true_negatives(labels=label_ids, predictions=predictions)
MCC = (TP * TN - FP * FN) / ((TP + FP) * (TP + FN) * (TN + FP) * (TN + FN)) ** 0.5
MCC_op = tf.group(FN_op, TN_op, TP_op, FP_op, tf.identity(MCC, name="MCC"))
return {"MCC": (MCC, MCC_op)}
elif task_name == "mrpc":
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
f1 = tf_metrics.f1(labels=label_ids, predictions=predictions, num_classes=2, pos_indices=[1])
return {
"eval_accuracy": accuracy,
"eval_f1": f1,
"eval_loss": loss,
}
else:
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions)
loss = tf.metrics.mean(values=per_example_loss)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
tf.compat.v1.logging.info("*** Features ***")
tf.compat.v1.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
if not is_training and FLAGS.use_trt:
trt_graph = get_frozen_tftrt_model(bert_config, input_ids.shape, num_labels, use_one_hot_embeddings, init_checkpoint)
(total_loss, per_example_loss, logits, probabilities) = tf.import_graph_def(trt_graph,
input_map={'input_ids':input_ids, 'input_mask':input_mask, 'segment_ids':segment_ids, 'label_ids':label_ids},
return_elements=['loss/cls_loss:0', 'loss/cls_per_example_loss:0', 'loss/cls_logits:0', 'loss/cls_probabilities:0'],
name='')
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"probabilities": probabilities}
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=predictions)
elif mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
return output_spec
(total_loss, per_example_loss, logits, probabilities) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint and (hvd is None or hvd.rank() == 0):
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps,
hvd, False, FLAGS.amp, FLAGS.num_accumulation_steps, FLAGS.optimizer_type)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
loss_scaler = tf.train.experimental.FixedLossScale(1)
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler)
eval_metric_ops = metric_fn(per_example_loss, label_ids, logits)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=total_loss,
eval_metric_ops=eval_metric_ops)
else:
dummy_op = tf.no_op()
# Need to call mixed precision graph rewrite if fp16 to enable graph rewrite
if FLAGS.amp:
dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite(
optimization.LAMBOptimizer(learning_rate=0.0))
output_spec = tf.estimator.EstimatorSpec(
mode=mode, predictions=probabilities)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, batch_size, seq_length, is_training, drop_remainder, hvd=None):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn():
"""The actual input function."""
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
if hvd is not None: d = d.shard(hvd.size(), hvd.rank())
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder)
return d
return input_fn
def main(_):
setup_xla_flags()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path)
if FLAGS.horovod:
hvd.init()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
}
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
tf.io.gfile.makedirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
master_process = True
training_hooks = []
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps
hvd_rank = 0
config = tf.compat.v1.ConfigProto()
if FLAGS.horovod:
tf.compat.v1.logging.info("Multi-GPU training with TF Horovod")
tf.compat.v1.logging.info("hvd.size() = %d hvd.rank() = %d", hvd.size(), hvd.rank())
global_batch_size = FLAGS.train_batch_size * FLAGS.num_accumulation_steps * hvd.size()
master_process = (hvd.rank() == 0)
hvd_rank = hvd.rank()
config.gpu_options.visible_device_list = str(hvd.local_rank())
set_affinity(hvd.local_rank())
if hvd.size() > 1:
training_hooks.append(hvd.BroadcastGlobalVariablesHook(0))
if FLAGS.use_xla:
config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1
if FLAGS.amp:
tf.enable_resource_variables()
run_config = tf.estimator.RunConfig(
model_dir=FLAGS.output_dir if master_process else None,
session_config=config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None,
save_summary_steps=FLAGS.save_checkpoints_steps if master_process else None,
log_step_count_steps=FLAGS.display_loss_steps,
keep_checkpoint_max=1)
if master_process:
tf.compat.v1.logging.info("***** Configuaration *****")
for key in FLAGS.__flags.keys():
tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key)))
tf.compat.v1.logging.info("**************************")
train_examples = None
num_train_steps = None
num_warmup_steps = None
training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank, FLAGS.save_checkpoints_steps, num_steps_ignore_xla=25))
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / global_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
start_index = 0
end_index = len(train_examples)
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")]
if FLAGS.horovod:
tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())]
num_examples_per_rank = len(train_examples) // hvd.size()
remainder = len(train_examples) % hvd.size()
if hvd.rank() < remainder:
start_index = hvd.rank() * (num_examples_per_rank+1)
end_index = start_index + num_examples_per_rank + 1
else:
start_index = hvd.rank() * num_examples_per_rank + remainder
end_index = start_index + (num_examples_per_rank)
model_fn = model_fn_builder(
task_name=task_name,
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(),
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_one_hot_embeddings=False,
hvd=None if not FLAGS.horovod else hvd)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config)
if FLAGS.do_train:
file_based_convert_examples_to_features(
train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank])
tf.compat.v1.logging.info("***** Running training *****")
tf.compat.v1.logging.info(" Num examples = %d", len(train_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.compat.v1.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=tmp_filenames,
batch_size=FLAGS.train_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
hvd=None if not FLAGS.horovod else hvd)
train_start_time = time.time()
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks)
train_time_elapsed = time.time() - train_start_time
train_time_wo_overhead = training_hooks[-1].total_time
avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed
ss_sentences_per_second = (training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead
if master_process:
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed,
num_train_steps * global_batch_size)
tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead,
(training_hooks[-1].count - training_hooks[-1].skipped) * global_batch_size)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
tf.compat.v1.logging.info("-----------------------------")
if FLAGS.do_eval and master_process:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.compat.v1.logging.info("***** Running evaluation *****")
tf.compat.v1.logging.info(" Num examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
batch_size=FLAGS.eval_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
eval_hooks = [LogEvalRunHook(FLAGS.eval_batch_size)]
eval_start_time = time.time()
result = estimator.evaluate(input_fn=eval_input_fn, hooks=eval_hooks)
eval_time_elapsed = time.time() - eval_start_time
time_list = eval_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.eval_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed,
eval_hooks[-1].count * FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on EVAL set")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.eval_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.io.gfile.GFile(output_eval_file, "w") as writer:
tf.compat.v1.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
dllogging.logger.log(step=(), data={key: float(result[key])}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict and master_process:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.compat.v1.logging.info("***** Running prediction*****")
tf.compat.v1.logging.info(" Num examples = %d", len(predict_examples))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
batch_size=FLAGS.predict_batch_size,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
predict_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)]
predict_start_time = time.time()
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
with tf.io.gfile.GFile(output_predict_file, "w") as writer:
tf.compat.v1.logging.info("***** Predict results *****")
for prediction in estimator.predict(input_fn=predict_input_fn, hooks=predict_hooks,
yield_single_examples=False):
output_line = "\t".join(
str(class_probability) for class_probability in prediction) + "\n"
writer.write(output_line)
predict_time_elapsed = time.time() - predict_start_time
time_list = predict_hooks[-1].time_list
time_list.sort()
# Removing outliers (init/warmup) in throughput computation.
predict_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.8)])
num_sentences = (int(len(time_list) * 0.8)) * FLAGS.predict_batch_size
avg = np.mean(time_list)
cf_50 = max(time_list[:int(len(time_list) * 0.50)])
cf_90 = max(time_list[:int(len(time_list) * 0.90)])
cf_95 = max(time_list[:int(len(time_list) * 0.95)])
cf_99 = max(time_list[:int(len(time_list) * 0.99)])
cf_100 = max(time_list[:int(len(time_list) * 1)])
ss_sentences_per_second = num_sentences * 1.0 / predict_time_wo_overhead
tf.compat.v1.logging.info("-----------------------------")
tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", predict_time_elapsed,
predict_hooks[-1].count * FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", predict_time_wo_overhead,
num_sentences)
tf.compat.v1.logging.info("Summary Inference Statistics on TEST SET")
tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size)
tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length)
tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32")
tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000)
tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000)
tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000)
tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second)
dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT)
tf.compat.v1.logging.info("-----------------------------")
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | decoderInstancePlugins | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_DECODERINSTANCEPLUGINS_H
#define TT2I_DECODERINSTANCEPLUGINS_H
#include "binding.h"
#include "cudaMemory.h"
#include "decoderInstance.h"
#include "NvInfer.h"
#include <cuda_runtime.h>
#include <memory>
#include <string>
namespace tts
{
class DecoderInstancePlugins : public DecoderInstance
{
public:
static constexpr const char* const ENGINE_NAME = "tacotron2_decoder_plugins";
/**
* @brief Create a new DecoderInstancePlugins.
*
* @param engine The ICudaEngine containing the decoder network.
* @param maxChunkSize The maximum sized chunk the decoder will process.
*/
DecoderInstancePlugins(
TRTPtr<nvinfer1::ICudaEngine> engine, int maxChunkSize);
/**
* @brief Reset the decoder for new input.
*
* @param stream The stream to run on.
*/
void reset(cudaStream_t stream) override;
protected:
/**
* @brief Decode a single frame of output.
*
* @param stream The stream to operate on.
* @param context The execution context.
* @param batchSize The size of the batch to process.
* @param inputLastFrameDevice The last frame of output produced (all 0s
* for first frame).
* @param inputMemoryDevice The "Memory" tensor on the device.
* @param inputProcessedMemoryDevice The "Processed Memory" tensor on the
* device.
* @param inputMaskDevice The input mask on the device (1 for i < input
* length, 0 for i >= input length).
* @param inputLengthHost The length of each input item on the host.
* @param inputLengthDevice The length of each input on the device.
* @param inputDropoutsDevice The dropout vector to use on the device.
* @param outputFrameDevice The output frame on the device.
*/
void decode(cudaStream_t stream, nvinfer1::IExecutionContext& engine, int batchSize,
const float* inputLastFrameDevice, const float* inputMemoryDevice, const float* inputProcessedMemoryDevice,
const float* inputMaskDevice, const int32_t* inputLengthHost, const int32_t* inputLengthDevice,
const float* inputDropoutDevice, float* outputFrameDevice) override;
private:
int mNumEncodingDim;
int mNumAttentionDim;
bool mDimsSet;
Binding mBinding;
CudaMemory<float> mInputWeightsDevice;
CudaMemory<float> mOutputWeightsDevice;
CudaMemory<float> mInAttentionHiddenStatesDevice;
CudaMemory<float> mInAttentionCellStatesDevice;
CudaMemory<float> mOutAttentionHiddenStatesDevice;
CudaMemory<float> mOutAttentionCellStatesDevice;
CudaMemory<float> mInputAttentionContextDevice;
CudaMemory<float> mOutputAttentionContextDevice;
CudaMemory<float> mInDecoderHiddenStatesDevice;
CudaMemory<float> mInDecoderCellStatesDevice;
CudaMemory<float> mOutDecoderHiddenStatesDevice;
CudaMemory<float> mOutDecoderCellStatesDevice;
};
} // namespace tts
#endif
|
CUDA-Optimized/FastSpeech/fastspeech | fastspeech | data_load | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import torch
from torch.utils.data import DataLoader
class PadDataLoader(DataLoader):
@staticmethod
def pad_collate_fn(batch):
"""
Apply zero-padding.
"""
# TODO refactor
result = dict()
for key in batch[0].keys():
# apply padding on dataset
sub_batch = [elem[key] for elem in batch]
# check diff dims
if not isinstance(sub_batch[0], np.ndarray):
# if list of float or int
assert all([type(x) == type(sub_batch[0]) for x in sub_batch[1:]]), sub_batch
if isinstance(sub_batch[0], int):
sub_batch = torch.LongTensor(sub_batch)
elif isinstance(sub_batch[0], float):
sub_batch = torch.DoubleTensor(sub_batch)
elif any(list(map(lambda x: x.shape != sub_batch[0].shape, sub_batch[1:]))):
sub_batch = torch.from_numpy(__class__.pad_zero(sub_batch))
else:
sub_batch = torch.from_numpy(np.concatenate(np.expand_dims(sub_batch, axis=0)))
result[key] = sub_batch
return result
def __init__(self, dataset, batch_size, num_workers, shuffle=True, pin_memory=True, drop_last=True):
super().__init__(dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=self.pad_collate_fn,
drop_last=drop_last
)
@staticmethod
def pad_zero(sub_batch):
dims = [b.shape for b in sub_batch]
max_dims = list(dims[0])
for d_li in dims[1:]:
for d_idx in range(len(d_li)):
if max_dims[d_idx] < d_li[d_idx]:
max_dims[d_idx] = d_li[d_idx]
temp = np.zeros((len(sub_batch), *max_dims), dtype=sub_batch[0].dtype)
for i, b in enumerate(sub_batch):
if len(b.shape) == 1:
temp[i, :b.shape[0]] = b
elif len(b.shape) == 2:
temp[i, :b.shape[0], :b.shape[1]] = b
elif len(b.shape) == 3:
temp[i, :b.shape[0], :b.shape[1], :b.shape[2]] = b
else:
raise ValueError
return temp
|
PyTorch/LanguageModeling/BERT/lamb_amp_opt | lamb_amp_opt | setup | from setuptools import find_packages
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='fused_lamb',
description="Fused LAMB Optimizer for PyTorch native AMP training",
packages=find_packages(exclude=('test',)), # NOQA
ext_modules=[
CUDAExtension(
name='fused_lamb_CUDA',
sources=[
'csrc/frontend.cpp',
'csrc/multi_tensor_l2norm_kernel.cu',
'csrc/multi_tensor_lamb.cu',
],
extra_compile_args={
'nvcc': [
'-lineinfo',
'-O3',
'--use_fast_math',
],
}
),
],
cmdclass={
'build_ext': BuildExtension.with_options(use_ninja=True),
},
)
|
TensorFlow/Detection/SSD/models/research/object_detection/box_coders | box_coders | square_box_coder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Square box coder.
Square box coder follows the coding schema described below:
l = sqrt(h * w)
la = sqrt(ha * wa)
ty = (y - ya) / la
tx = (x - xa) / la
tl = log(l / la)
where x, y, w, h denote the box's center coordinates, width, and height,
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tl denote the anchor-encoded
center, and length, respectively. Because the encoded box is a square, only
one length is encoded.
This has shown to provide performance improvements over the Faster RCNN box
coder when the objects being detected tend to be square (e.g. faces) and when
the input images are not distorted via resizing.
"""
import tensorflow as tf
from object_detection.core import box_coder
from object_detection.core import box_list
EPSILON = 1e-8
class SquareBoxCoder(box_coder.BoxCoder):
"""Encodes a 3-scalar representation of a square box."""
def __init__(self, scale_factors=None):
"""Constructor for SquareBoxCoder.
Args:
scale_factors: List of 3 positive scalars to scale ty, tx, and tl.
If set to None, does not perform scaling. For faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0].
Raises:
ValueError: If scale_factors is not length 3 or contains values less than
or equal to 0.
"""
if scale_factors:
if len(scale_factors) != 3:
raise ValueError('The argument scale_factors must be a list of length '
'3.')
if any(scalar <= 0 for scalar in scale_factors):
raise ValueError('The values in scale_factors must all be greater '
'than 0.')
self._scale_factors = scale_factors
@property
def code_size(self):
return 3
def _encode(self, boxes, anchors):
"""Encodes a box collection with respect to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, tl].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
l = tf.sqrt(h * w)
# Avoid NaN in division and log below.
la += EPSILON
l += EPSILON
tx = (xcenter - xcenter_a) / la
ty = (ycenter - ycenter_a) / la
tl = tf.log(l / la)
# Scales location targets for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
tl *= self._scale_factors[2]
return tf.transpose(tf.stack([ty, tx, tl]))
def _decode(self, rel_codes, anchors):
"""Decodes relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
la = tf.sqrt(ha * wa)
ty, tx, tl = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
tl /= self._scale_factors[2]
l = tf.exp(tl) * la
ycenter = ty * la + ycenter_a
xcenter = tx * la + xcenter_a
ymin = ycenter - l / 2.
xmin = xcenter - l / 2.
ymax = ycenter + l / 2.
xmax = xcenter + l / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
|
DGLPyTorch/DrugDiscovery/SE3Transformer | SE3Transformer | requirements | e3nn==0.3.3
wandb==0.12.0
pynvml==11.0.0
git+https://github.com/NVIDIA/dllogger#egg=dllogger
|
PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler | lr_scheduler | inverse_square_root_schedule | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('inverse_sqrt')
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`--warmup-init-lr`) until the configured
learning rate (`--lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = args.lr * sqrt(args.warmup_updates)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with inverse_sqrt.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates * self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python/ops | ops | dot_based_interact_ops | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
dot_based_interact_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile('_dot_based_interact_ops.so'))
dot_based_interact = dot_based_interact_ops.dot_based_interact
@ops.RegisterGradient("DotBasedInteract")
def dot_based_interact_grad(op, grad):
input = op.inputs[0]
return dot_based_interact_ops.dot_based_interact_grad(input, grad)
|
TensorFlow2/LanguageModeling/BERT/official/utils/misc | misc | keras_utils | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for the Keras implementations of models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import time
from absl import logging
import tensorflow as tf
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import profiler
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, log_steps):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
"""
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.global_steps = 0
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
self.global_steps += 1
if self.global_steps == 1:
self.start_time = time.time()
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
if self.global_steps % self.log_steps == 0:
timestamp = time.time()
elapsed_time = timestamp - self.start_time
examples_per_second = (self.batch_size * self.log_steps) / elapsed_time
self.timestamp_log.append(BatchTimestamp(self.global_steps, timestamp))
logging.info(
"BenchmarkMetric: {'global step':%d, 'time_taken': %f,"
"'examples_per_second': %f}",
self.global_steps, elapsed_time, examples_per_second)
self.start_time = timestamp
def on_epoch_end(self, epoch, logs=None):
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
logging.info(
"BenchmarkMetric: {'epoch':%d, 'time_taken': %f}",
epoch, epoch_run_time)
def get_profiler_callback(model_dir, profile_steps, enable_tensorboard,
steps_per_epoch):
"""Validate profile_steps flag value and return profiler callback."""
profile_steps_error_message = (
'profile_steps must be a comma separated pair of positive integers, '
'specifying the first and last steps to be profiled.'
)
try:
profile_steps = [int(i) for i in profile_steps.split(',')]
except ValueError:
raise ValueError(profile_steps_error_message)
if len(profile_steps) != 2:
raise ValueError(profile_steps_error_message)
start_step, stop_step = profile_steps
if start_step < 0 or start_step > stop_step:
raise ValueError(profile_steps_error_message)
if enable_tensorboard:
logging.warning(
'Both TensorBoard and profiler callbacks are used. Note that the '
'TensorBoard callback profiles the 2nd step (unless otherwise '
'specified). Please make sure the steps profiled by the two callbacks '
'do not overlap.')
return ProfilerCallback(model_dir, start_step, stop_step, steps_per_epoch)
class ProfilerCallback(tf.keras.callbacks.Callback):
"""Save profiles in specified step range to log directory."""
def __init__(self, log_dir, start_step, stop_step, steps_per_epoch):
super(ProfilerCallback, self).__init__()
self.log_dir = log_dir
self.start_step = start_step
self.stop_step = stop_step
self.start_epoch = start_step // steps_per_epoch
self.stop_epoch = stop_step // steps_per_epoch
self.start_step_in_epoch = start_step % steps_per_epoch
self.stop_step_in_epoch = stop_step % steps_per_epoch
self.should_start = False
self.should_stop = False
def on_epoch_begin(self, epoch, logs=None):
if epoch == self.start_epoch:
self.should_start = True
if epoch == self.stop_epoch:
self.should_stop = True
def on_batch_begin(self, batch, logs=None):
if batch == self.start_step_in_epoch and self.should_start:
self.should_start = False
profiler.start()
logging.info('Profiler started at Step %s', self.start_step)
def on_batch_end(self, batch, logs=None):
if batch == self.stop_step_in_epoch and self.should_stop:
self.should_stop = False
results = profiler.stop()
profiler.save(self.log_dir, results)
logging.info(
'Profiler saved profiles for steps between %s and %s to %s',
self.start_step, self.stop_step, self.log_dir)
def set_session_config(enable_eager=False,
enable_xla=False):
"""Sets the session config."""
if is_v2_0():
set_config_v2(enable_xla=enable_xla)
else:
config = get_config_proto_v1(enable_xla=enable_xla)
if enable_eager:
tf.compat.v1.enable_eager_execution(config=config)
else:
sess = tf.Session(config=config)
tf.keras.backend.set_session(sess)
def get_config_proto_v1(enable_xla=False):
"""Return config proto according to flag settings, or None to use default."""
config = None
if enable_xla:
config = tf.compat.v1.ConfigProto()
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_2)
return config
def set_config_v2(enable_xla=False):
"""Config eager context according to flag values using TF 2.0 API."""
if enable_xla:
tf.config.optimizer.set_jit(True)
def is_v2_0():
"""Returns true if using tf 2.0."""
return tf2.enabled()
def set_gpu_thread_mode_and_count(gpu_thread_mode,
datasets_num_private_threads,
num_gpus, per_gpu_thread_count):
"""Set GPU thread mode and count, and adjust dataset threads count."""
cpu_count = multiprocessing.cpu_count()
logging.info('Logical CPU cores: %s', cpu_count)
# Allocate private thread pool for each GPU to schedule and launch kernels
per_gpu_thread_count = per_gpu_thread_count or 2
os.environ['TF_GPU_THREAD_MODE'] = gpu_thread_mode
os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
logging.info('TF_GPU_THREAD_COUNT: %s',
os.environ['TF_GPU_THREAD_COUNT'])
logging.info('TF_GPU_THREAD_MODE: %s',
os.environ['TF_GPU_THREAD_MODE'])
# Limit data preprocessing threadpool to CPU cores minus number of total GPU
# private threads and memory copy threads.
total_gpu_thread_count = per_gpu_thread_count * num_gpus
num_runtime_threads = num_gpus
if not datasets_num_private_threads:
datasets_num_private_threads = min(
cpu_count - total_gpu_thread_count - num_runtime_threads,
num_gpus * 8)
logging.info('Set datasets_num_private_threads to %s',
datasets_num_private_threads)
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2LSTMCellPlugin | taco2LSTMCellPlugin | taco2LSTMCellLayerPluginCreator | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_LSTMCELLLAYERPLUGINCREATOR_H
#define TT2I_LSTMCELLLAYERPLUGINCREATOR_H
#include "NvInfer.h"
#include <string>
#ifdef DEVEL
// The destructor of nvinfer1::IPluginCreator is non-virtual and public, so
// we need to supress the warning.
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#endif
namespace nvinfer1
{
namespace plugin
{
class Taco2LSTMCellLayerPluginCreator : public nvinfer1::IPluginCreator
{
public:
/**
* @brief Get the collection of fields for this plugin, with their names only.
*
* @return The collection of fields.
*/
static nvinfer1::PluginFieldCollection* getFields();
/**
* @brief Create a new Taco2LSTMCellLayerPluginCreator.
*/
Taco2LSTMCellLayerPluginCreator();
/**
* @brief Get the name of the plugin.
*
* @return The name of the plugin.
*/
const char* getPluginName() const override;
/**
* @brief Get the plugin version.
*
* @return The plugin version.
*/
const char* getPluginVersion() const override;
/**
* @brief Get the collection of fields for this plugin.
*
* @return The collection of fields.
*/
const nvinfer1::PluginFieldCollection* getFieldNames() override;
/**
* @brief Create a new Taco2LSTMCellLayerPlugin.
*
* @param name The name (unused currently).
* @param fc The collection of fields to initialize with.
*
* @return The created plugin.
*/
nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override;
/**
* @brief Create a custom layer by name from a data stream.
*
* @param layerName The name of the layer.
* @param serialData The serialized data for the layer.
* @param serialLength The length of the serialized data.
*
* @return The plugin. Clients must destroy the plugin once all consumers of
* it have been destroyed.
*/
nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override;
/**
* @brief Set the namespace for created plugins.
*
* @param pluginNamespace The namespace.
*/
void setPluginNamespace(const char* pluginNamespace) override;
/**
* @brief Get the namespace for created plugins.
*
* @return The namespace.
*/
const char* getPluginNamespace() const override;
private:
std::string mNamespace;
};
} // namespace plugin
} // namespace nvinfer1
#ifdef DEVEL
#pragma GCC diagnostic pop
#endif
#endif
|
PyTorch/SpeechSynthesis/FastPitch/triton | triton | metrics | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, NamedTuple, Optional
import numpy as np
from deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, output_used_for_metrics: str):
self._output_used_for_metrics = output_used_for_metrics
self._MEL_MIN = -15.0
self._MEL_MAX = 3.0
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
y_pred = y_pred[self._output_used_for_metrics]
value_range_correct = np.ones(y_pred.shape[0]).astype(np.int32)
for idx, mel in enumerate(y_pred):
mel = mel[~np.isnan(mel)]
if mel.min() < self._MEL_MIN or mel.max() > self._MEL_MAX:
value_range_correct[idx] = 0
return {
"accuracy": np.mean(value_range_correct)
}
# from LJSpeech:
# min(mins) # Out[27]: -11.512925148010254
# max(maxs) # Out[28]: 2.0584452152252197
# min(sizes) # Out[29]: 96
# max(sizes) # Out[30]: 870
|
TensorFlow/Detection/SSD/models | models | ISSUE_TEMPLATE | Please go to Stack Overflow for help and support:
http://stackoverflow.com/questions/tagged/tensorflow
Also, please understand that many of the models included in this repository are experimental and research-style code. If you open a GitHub issue, here is our policy:
1. It must be a bug, a feature request, or a significant problem with documentation (for small docs fixes please send a PR instead).
2. The form below must be filled out.
**Here's why we have that policy**: TensorFlow developers respond to issues. We want to focus on work that benefits the whole community, e.g., fixing bugs and adding features. Support only helps individuals. GitHub also notifies thousands of people when issues are filed. We want them to see you communicating an interesting problem, rather than being redirected to Stack Overflow.
------------------------
### System information
- **What is the top-level directory of the model you are using**:
- **Have I written custom code (as opposed to using a stock example script provided in TensorFlow)**:
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
- **TensorFlow installed from (source or binary)**:
- **TensorFlow version (use command below)**:
- **Bazel version (if compiling from source)**:
- **CUDA/cuDNN version**:
- **GPU model and memory**:
- **Exact command to reproduce**:
You can collect some of this information using our environment capture script:
https://github.com/tensorflow/tensorflow/tree/master/tools/tf_env_collect.sh
You can obtain the TensorFlow version with
python -c "import tensorflow as tf; print(tf.GIT_VERSION, tf.VERSION)"
### Describe the problem
Describe the problem clearly here. Be sure to convey here why it's a bug in TensorFlow or a feature request.
### Source code / logs
Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem.
|
PyTorch/Detection/Efficientdet/scripts/D0 | D0 | validation_AMP_V100-32G | #!/bin/bash
rm -rf *.json
python -u -m bind_launch --nproc_per_node=${NUM_PROC:-1} validate.py '/workspace/object_detection/datasets/coco/' --model efficientdet_d0 -b ${BATCH_SIZE:-8} --torchscript --use-ema --amp --checkpoint ${CKPT_PATH:-/checkpoints/Effdet_B0.pth} |
PyTorch/Recommendation/DLRM/dlrm/cuda_ext | cuda_ext | dot_based_interact | # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.autograd import Function
if torch.cuda.get_device_capability()[0] >= 8:
from dlrm.cuda_ext import interaction_ampere as interaction
else:
from dlrm.cuda_ext import interaction_volta as interaction
class DotBasedInteract(Function):
""" Forward and Backward paths of cuda extension for dot-based feature interact."""
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.half)
def forward(ctx, input, bottom_mlp_output):
output = interaction.dotBasedInteractFwd(input, bottom_mlp_output)
ctx.save_for_backward(input)
return output
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad_output):
input, = ctx.saved_tensors
grad, mlp_grad = interaction.dotBasedInteractBwd(input, grad_output)
return grad, mlp_grad
dotBasedInteract = DotBasedInteract.apply
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | requirements | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
tqdm>=4.44.1
docker==5.0.0
colorama==0.4.4
pytz==2021.1
coloredlogs==15.0.1
py-cpuinfo==8.0.0
psutil==5.8.0
retrying>=1.3.3 |
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph | graph | random_bipartite | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Optional, Set, Tuple
from syngen.generator.graph.fitter import RMATFitter
from syngen.generator.graph.rmat_bipartite import RMATBipartiteGenerator
class RandomBipartite(RMATBipartiteGenerator):
""" Graph generator based on erdos-renyi model that generate random bipartite graphs
Args:
seed (int):
Seed to reproduce the results. If None then random seed will be used.
logdir (str):
Directory to store the logging results.
Defaults to ./logs.
fitter (RMATFitter):
RMATFitter to be used.
"""
def __init__(self, seed: Optional[int] = None, logdir: str = "./logs", gpu: bool = True, **kwargs,):
super().__init__(seed, logdir, gpu, fitter=RMATFitter(random=True))
self.fit()
def fit(
self,
graph: Optional[List[Tuple[int, int]]] = None,
src_set: Optional[Set[int]] = None,
dst_set: Optional[Set[int]] = None,
is_directed: bool = False,
transform_graph: bool = True,
):
""" Fits generator on the graph. For random graph it is graph independent."""
self._fit_src_dst_results = self.fitter.fit(graph)
self._fit_dst_src_results = (
None if not is_directed else self.fitter.fit(graph)
)
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | mask_rcnn_resnet101_atrous_coco | # Mask R-CNN with Resnet-101 (v1), Atrous version
# Configured for MSCOCO Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 90
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 800
max_dimension: 1365
}
}
number_of_stages: 3
feature_extractor {
type: 'faster_rcnn_resnet101'
first_stage_features_stride: 8
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 8
width_stride: 8
}
}
first_stage_atrous_rate: 2
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
predict_instance_masks: true
mask_height: 33
mask_width: 33
mask_prediction_conv_depth: 0
mask_prediction_num_conv_layers: 4
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
second_stage_mask_prediction_loss_weight: 4.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
load_instance_masks: true
mask_type: PNG_MASKS
}
eval_config: {
num_examples: 8000
# Note: The below line limits the evaluation process to 10 evaluations.
# Remove the below line to evaluate indefinitely.
max_evals: 10
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"
load_instance_masks: true
mask_type: PNG_MASKS
shuffle: false
num_readers: 1
}
|
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers | layers | on_device_embedding_test | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras-based one-hot embedding layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import
from official.nlp.modeling.layers import on_device_embedding
# This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It
# guarantees forward compatibility of this code for the V2 switchover.
@keras_parameterized.run_all_keras_modes
class OnDeviceEmbeddingTest(keras_parameterized.TestCase):
def test_layer_creation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float32)
def test_layer_creation_with_float16_dtype(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16")
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float16)
def test_layer_invocation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
def test_layer_invocation_with_float16_dtype(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size, embedding_width=embedding_width, dtype="float16")
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float16, output.dtype)
def test_one_hot_layer_creation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float32)
def test_one_hot_layer_creation_with_float16_dtype(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
dtype="float16",
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# The output should be the same as the input, save that it has an extra
# embedding_width dimension on the end.
expected_output_shape = [None, sequence_length, embedding_width]
self.assertEqual(expected_output_shape, output_tensor.shape.as_list())
self.assertEqual(output_tensor.dtype, tf.float16)
def test_one_hot_layer_invocation(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float32, output.dtype)
def test_one_hot_layer_invocation_with_float16_dtype(self):
vocab_size = 31
embedding_width = 27
test_layer = on_device_embedding.OnDeviceEmbedding(
vocab_size=vocab_size,
embedding_width=embedding_width,
dtype="float16",
use_one_hot=True)
# Create a 2-dimensional input (the first dimension is implicit).
sequence_length = 23
input_tensor = tf.keras.Input(shape=(sequence_length), dtype=tf.int32)
output_tensor = test_layer(input_tensor)
# Create a model from the test layer.
model = tf.keras.Model(input_tensor, output_tensor)
# Invoke the model on test data. We can't validate the output data itself
# (the NN is too complex) but this will rule out structural runtime errors.
batch_size = 3
input_data = np.random.randint(
vocab_size, size=(batch_size, sequence_length))
output = model.predict(input_data)
self.assertEqual(tf.float16, output.dtype)
if __name__ == "__main__":
tf.test.main()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/deployment/convert | convert | onnx | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config:
type: onnx
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/utils | utils | __init__ | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel (tgrel@nvidia.com)
|
PyTorch/Classification/ConvNets/triton/resnext101-32x4d | resnext101-32x4d | README | # Deploying the ResNeXt101-32x4d model using Triton Inference Server
The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server.
This folder contains instructions on how to deploy and run inference on
Triton Inference Server as well as gather detailed performance analysis.
## Table Of Contents
* [Model overview](#model-overview)
* [Setup](#setup)
* [Inference container](#inference-container)
* [Deploying the model](#deploying-the-model)
* [Running the Triton Inference Server](#running-the-triton-inference-server)
* [Quick Start Guide](#quick-start-guide)
* [Running the client](#running-the-client)
* [Gathering performance data](#gathering-performance-data)
* [Advanced](#advanced)
* [Automated benchmark script](#automated-benchmark-script)
* [Performance](#performance)
* [Dynamic batching performance](#dynamic-batching-performance)
* [TensorRT backend inference performance (1x V100 16GB)](#tensorrt-backend-inference-performance-1x-v100-16gb)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The ResNeXt101-32x4d is a model introduced in the [Aggregated Residual Transformations for Deep Neural Networks](https://arxiv.org/pdf/1611.05431.pdf) paper.
It is based on regular ResNet model, substituting 3x3 convolutions inside the bottleneck block for 3x3 grouped convolutions.
The ResNeXt101-32x4d model can be deployed for inference on the [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) using
TorchScript, ONNX Runtime or TensorRT as an execution backend.
## Setup
This script requires trained ResNeXt101-32x4d model checkpoint that can be used for deployment.
### Inference container
For easy-to-use deployment, a build script for special inference container was prepared. To build that container, go to the main repository folder and run:
`docker build -t rnxt_inference . -f triton/Dockerfile`
This command will download the dependencies and build the inference containers. Then, run shell inside the container:
`docker run -it --rm --gpus device=0 --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --net=host -v <PATH_TO_MODEL_REPOSITORY>:/repository rnxt_inference bash`
Here `device=0,1,2,3` selects the GPUs indexed by ordinals `0,1,2` and `3`, respectively. The server will see only these GPUs. If you write `device=all`, then the server will see all the available GPUs. `PATH_TO_MODEL_REPOSITORY` indicates location to where the
deployed models were stored.
### Deploying the model
To deploy the ResNext101-32x4d model into the Triton Inference Server, you must run the `deployer.py` script from inside the deployment Docker container to achieve a compatible format.
```
usage: deployer.py [-h] (--ts-script | --ts-trace | --onnx | --trt)
[--triton-no-cuda] [--triton-model-name TRITON_MODEL_NAME]
[--triton-model-version TRITON_MODEL_VERSION]
[--triton-server-url TRITON_SERVER_URL]
[--triton-max-batch-size TRITON_MAX_BATCH_SIZE]
[--triton-dyn-batching-delay TRITON_DYN_BATCHING_DELAY]
[--triton-engine-count TRITON_ENGINE_COUNT]
[--save-dir SAVE_DIR]
[--max_workspace_size MAX_WORKSPACE_SIZE] [--trt-fp16]
[--capture-cuda-graph CAPTURE_CUDA_GRAPH]
...
optional arguments:
-h, --help show this help message and exit
--ts-script convert to torchscript using torch.jit.script
--ts-trace convert to torchscript using torch.jit.trace
--onnx convert to onnx using torch.onnx.export
--trt convert to trt using tensorrt
triton related flags:
--triton-no-cuda Use the CPU for tracing.
--triton-model-name TRITON_MODEL_NAME
exports to appropriate directory structure for TRITON
--triton-model-version TRITON_MODEL_VERSION
exports to appropriate directory structure for TRITON
--triton-server-url TRITON_SERVER_URL
exports to appropriate directory structure for TRITON
--triton-max-batch-size TRITON_MAX_BATCH_SIZE
Specifies the 'max_batch_size' in the TRITON model
config. See the TRITON documentation for more info.
--triton-dyn-batching-delay TRITON_DYN_BATCHING_DELAY
Determines the dynamic_batching queue delay in
milliseconds(ms) for the TRITON model config. Use '0'
or '-1' to specify static batching. See the TRITON
documentation for more info.
--triton-engine-count TRITON_ENGINE_COUNT
Specifies the 'instance_group' count value in the
TRITON model config. See the TRITON documentation for
more info.
--save-dir SAVE_DIR Saved model directory
optimization flags:
--max_workspace_size MAX_WORKSPACE_SIZE
set the size of the workspace for trt export
--trt-fp16 trt flag ---- export model in mixed precision mode
--capture-cuda-graph CAPTURE_CUDA_GRAPH
capture cuda graph for obtaining speedup. possible
values: 0, 1. default: 1.
model_arguments arguments that will be ignored by deployer lib and
will be forwarded to your deployer script
```
Following model specific arguments have to be specified for model deployment:
```
--config CONFIG Network architecture to use for deployment (eg. resnet50,
resnext101-32x4d or se-resnext101-32x4d)
--checkpoint CHECKPOINT
Path to stored model weight. If not specified, model will be
randomly initialized
--batch_size BATCH_SIZE
Batch size used for dummy dataloader
--fp16 Use model with half-precision calculations
```
For example, to deploy model into TensorRT format, using half precision and max batch size 64 called
`rnxt-trt-16` execute:
`python -m triton.deployer --trt --trt-fp16 --triton-model-name rnxt-trt-16 --triton-max-batch-size 64 --save-dir /repository -- --config resnext101-32x4d --checkpoint model_checkpoint --batch_size 64 --fp16`
Where `model_checkpoint` is a checkpoint for a trained model with the same architecture (resnext101-32x4d) as used during export.
### Running the Triton Inference Server
**NOTE: This step is executed outside the inference container.**
Pull the Triton Inference Server container from our repository:
`docker pull nvcr.io/nvidia/tritonserver:20.07-py3`
Run the command to start the Triton Inference Server:
`docker run -d --rm --gpus device=0 --ipc=host --network=host -p 8000:8000 -p 8001:8001 -p 8002:8002 -v <PATH_TO_MODEL_REPOSITORY>:/models nvcr.io/nvidia/tritonserver:20.07-py3 trtserver --model-store=/models --log-verbose=1 --model-control-mode=poll --repository-poll-secs=5`
Here `device=0,1,2,3` selects GPUs indexed by ordinals `0,1,2` and `3`, respectively. The server will see only these GPUs. If you write `device=all`, then the server will see all the available GPUs. `PATH_TO_MODEL_REPOSITORY` indicates the location where the
deployed models were stored. An additional `--model-controle-mode` option allows to reload the model when it changes in the filesystem. It is a required option for benchmark scripts that works with multiple model versions on a single Triton Inference Server instance.
## Quick Start Guide
### Running the client
The client `client.py` checks the model accuracy against synthetic or real validation
data. The client connects to Triton Inference Server and performs inference.
```
usage: client.py [-h] --triton-server-url TRITON_SERVER_URL
--triton-model-name TRITON_MODEL_NAME [-v]
[--inference_data INFERENCE_DATA] [--batch_size BATCH_SIZE]
[--fp16]
optional arguments:
-h, --help show this help message and exit
--triton-server-url TRITON_SERVER_URL
URL adress of trtion server (with port)
--triton-model-name TRITON_MODEL_NAME
Triton deployed model name
-v, --verbose Verbose mode.
--inference_data INFERENCE_DATA
Path to file with inference data.
--batch_size BATCH_SIZE
Inference request batch size
--fp16 Use fp16 precision for input data
```
To run inference on the model exported in the previous steps, using the data located under
`/dataset`, run:
`python -m triton.client --triton-server-url localhost:8001 --triton-model-name rnxt-trt-16 --inference_data /data/test_data.bin --batch_size 16 --fp16`
### Gathering performance data
Performance data can be gathered using the `perf_client` tool. To use this tool to measure performance for batch_size=32, the following command can be used:
`/workspace/bin/perf_client --max-threads 10 -m rnxt-trt-16 -x 1 -p 10000 -v -i gRPC -u localhost:8001 -b 32 -l 5000 --concurrency-range 1 -f result.csv`
For more information about `perf_client`, refer to the [documentation](https://docs.nvidia.com/deeplearning/sdk/triton-inference-server-master-branch-guide/docs/optimization.html#perf-client).
## Advanced
### Automated benchmark script
To automate benchmarks of different model configurations, a special benchmark script is located in `triton/scripts/benchmark.sh`. To use this script,
run Triton Inference Server and then execute the script as follows:
`bash triton/scripts/benchmark.sh <MODEL_REPOSITORY> <LOG_DIRECTORY> <ARCHITECTURE> (<CHECKPOINT_PATH>)`
The benchmark script tests all supported backends with different batch sizes and server configuration. Logs from execution will be stored in `<LOG DIRECTORY>`.
To process static configuration logs, `triton/scripts/process_output.sh` script can be used.
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Dynamic batching performance
The Triton Inference Server has a dynamic batching mechanism built-in that can be enabled. When it is enabled, the server creates inference batches from multiple received requests. This allows us to achieve better performance than doing inference on each single request. The single request is assumed to be a single image that needs to be inferenced. With dynamic batching enabled, the server will concatenate single image requests into an inference batch. The upper bound of the size of the inference batch is set to 64. All these parameters are configurable.
Our results were obtained by running automated benchmark script.
Throughput is measured in images/second, and latency in milliseconds.
### TensorRT backend inference performance (1x V100 16GB)
**FP32 Inference Performance**
|**Concurrent requests**|**Throughput (img/s)**|**Avg. Latency (ms)**|**90% Latency (ms)**|**95% Latency (ms)**|**99% Latency (ms)**|
|-----|--------|-------|--------|-------|-------|
| 1 | 62.6 | 15.96 | 16.06 | 16.12 | 16.46|
|2 | 69.5 | 28.74 | 28.81 | 28.84 | 28.88|
|4 | 114.1 | 35.08 | 35.13 | 35.16 | 35.33|
|8 | 180 | 44.41 | 44.21 | 49.83 | 50.16|
|16 | 240 | 66.66 | 67.02 | 67.10 | 67.26|
|32 | 342.2 | 93.75 | 108.43 | 109.48 | 125.68|
|64 | 450.9 | 141.60 | 167.91 | 170.35 | 175.99|
|128 | 545.5 | 234.40 | 248.57 | 250.87 | 254.69|
|256 | 652.8 | 395.46 | 397.43 | 399.69 | 403.24|
**FP16 Inference Performance**
|**Concurrent requests**|**Throughput (img/s)**|**Avg. Latency (ms)**|**90% Latency (ms)**|**95% Latency (ms)**|**99% Latency (ms)**|
|-----|--------|-------|--------|-------|-------|
|1 | 85.7 | 11.68 | 11.76 | 11.79 | 11.85|
|2 | 92 | 21.74 | 21.83 | 21.86 | 21.91|
|4 | 141.7 | 28.22 | 35.01 | 35.38 | 35.51|
|8 | 235.4 | 33.98 | 38.05 | 38.67 | 38.85|
|16 | 393 | 40.67 | 42.90 | 43.28 | 43.50|
|32 | 624.8 | 51.18 | 51.71 | 51.82 | 52.08|
|64 | 874.6 | 73.39 | 74.39 | 74.60 | 75.12|
|128 | 1126.4 | 113.73 | 114.16 | 114.54 | 115.99|
|256 | 1312 | 195.87 | 196.87 | 197.75 | 199.06|
![Latency vs Througput](./Latency-vs-Throughput-TensorRT.png)
![Performance analysis - TensorRT FP32](./Performance-analysis-TensorRT-FP32.png)
![Performance analysis - TensorRT FP16](./Performance-analysis-TensorRT-FP16.png)
## Release notes
### Changelog
September 2020
- Initial release |
PyTorch/Detection/SSD/examples | examples | SSD300_A100_FP16_4GPU | # This script launches SSD300 training in FP16 on 4 GPUs using 1024 batch size (256 per GPU)
# Usage ./SSD300_FP16_4GPU.sh <path to this repository> <path to dataset> <additional flags>
torchrun --nproc_per_node=4 $1/main.py --backbone resnet50 --learning-rate 2.7e-3 --warmup 1200 --bs 256 --data $2 ${@:3}
|