relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
PyTorch/SpeechSynthesis/FastPitch/common/text
text
cmudict
""" from https://github.com/keithito/tacotron """ import re import sys import urllib.request from pathlib import Path valid_symbols = [ 'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2', 'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2', 'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY', 'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1', 'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0', 'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW', 'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH' ] _valid_symbol_set = set(valid_symbols) class CMUDict: '''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict''' def __init__(self, file_or_path=None, heteronyms_path=None, keep_ambiguous=True): self._entries = {} self.heteronyms = [] if file_or_path is not None: self.initialize(file_or_path, heteronyms_path, keep_ambiguous) def initialize(self, file_or_path, heteronyms_path, keep_ambiguous=True): if isinstance(file_or_path, str): if not Path(file_or_path).exists(): print("CMUdict missing. Downloading to data/cmudict/.") self.download() with open(file_or_path, encoding='latin-1') as f: entries = _parse_cmudict(f) else: entries = _parse_cmudict(file_or_path) if not keep_ambiguous: entries = {word: pron for word, pron in entries.items() if len(pron) == 1} self._entries = entries if heteronyms_path is not None: with open(heteronyms_path, encoding='utf-8') as f: self.heteronyms = [l.rstrip() for l in f] def __len__(self): if len(self._entries) == 0: raise ValueError("CMUDict not initialized") return len(self._entries) def lookup(self, word): '''Returns list of ARPAbet pronunciations of the given word.''' if len(self._entries) == 0: raise ValueError("CMUDict not initialized") return self._entries.get(word.upper()) def download(self): url = 'https://github.com/Alexir/CMUdict/raw/master/cmudict-0.7b' try: Path('cmudict').mkdir(parents=False, exist_ok=True) urllib.request.urlretrieve(url, filename='cmudict/cmudict-0.7b') except: print("Automatic download of CMUdict failed. Try manually with:") print() print(" bash scripts/download_cmudict.sh") print() print("and re-run the script.") sys.exit(0) _alt_re = re.compile(r'\([0-9]+\)') def _parse_cmudict(file): cmudict = {} for line in file: if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"): parts = line.split(' ') word = re.sub(_alt_re, '', parts[0]) pronunciation = _get_pronunciation(parts[1]) if pronunciation: if word in cmudict: cmudict[word].append(pronunciation) else: cmudict[word] = [pronunciation] return cmudict def _get_pronunciation(s): parts = s.strip().split(' ') for part in parts: if part not in _valid_symbol_set: return None return ' '.join(parts)
TensorFlow/Translation/GNMT/variable_mgr
variable_mgr
variable_mgr
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Defines VariableMgr and subclasses used to manage variables. """ from __future__ import print_function import re import tensorflow as tf from utils import misc_utils from variable_mgr import allreduce from variable_mgr import batch_allreduce from variable_mgr import variable_mgr_util class VariableMgr(object): """Abstract superclass for class used by BenchmarkCNN to control variables. Functions on this class are used to control how variables are created and managed, and how gradients are computed and applied. """ def __init__(self, benchmark_cnn): self.benchmark_cnn = benchmark_cnn self.staging_delta_ops = [] self.use_resource_vars = benchmark_cnn.params.use_resource_vars # A variable for automatic loss scaling. self.grad_has_inf_nan = None def each_tower_has_variables(self): """Returns True if each GPU tower of the model has separate variables.""" assert False, 'Must be implemented in subclass' def supports_staged_vars(self): """Whether staged variable management is supported.""" return False def create_outer_variable_scope(self, device_num): """Create the tf.variable_scope around all model graph operations.""" del device_num # unused by this implementation assert False, 'Must be implemented in subclass' def preprocess_device_grads(self, device_grads): """Preprocess the device gradients prior to applying them. Args: device_grads: List of lists of (gradient, variable) tuples. device_grads[t][g] = (gradient, variable), where t is the index of the tower and g is the index of the gradient-variable pair. Returns: a tuple of (apply_gradients_devices, gradient_state). gradient_state is an opaque structure that should be passed to get_gradients_to_apply() and append_apply_gradients_ops() (in that order). apply_gradients_devices is a list of devices where the gradients will be applied with get_gradients_to_apply() and append_apply_gradients_ops(). """ del device_grads # unused by this implementation assert False, 'Must be implemented in subclass' def get_gradients_to_apply(self, device_num, gradient_state): """Returns the [(gradient, variable)] list to apply for device_num. Args: device_num: indexes into apply_gradients_devices, which was returned by an earlier call to preprocess_device_grads. gradient_state: from previous call to apply_gradients_devices. """ del device_num, gradient_state # unused by this implementation assert False, 'Must be implemented in subclass' def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops, loss_scale_params): """Adds training ops for grads to 'training_ops'. Args: gradient_state: from previous call to apply_gradients_devices. opt: the underlying optimizer grads: [(grad, var)] to apply training_ops: list to which to add ops loss_scale_params: parameters for loss scaling. """ del gradient_state # unused by this implementation def get_apply_gradients_ops_func(): """Returns the apply_gradients op.""" return [opt.apply_gradients(grads)] variable_mgr_util.append_gradients_with_loss_scale( training_ops, get_apply_gradients_ops_func, loss_scale_params, self.grad_has_inf_nan) def get_post_init_ops(self): """Returns ops that should run post-initialization.""" return [] def get_devices(self): """Returns devices to use for computation; includes replica selection.""" assert False, 'Must be implemented in subclass' def savable_variables(self): """Returns a list/dict of savable variables to pass to tf.train.Saver.""" return tf.global_variables() def trainable_variables_on_device(self, rel_device_num, abs_device_num, writable=False): """Return the set of trainable variables on device. Args: rel_device_num: local worker device index. abs_device_num: global graph device index. writable: whether to get a reference to the underlying variable. Returns: The set of trainable variables on the specified device. """ del rel_device_num, writable if self.each_tower_has_variables(): params = [ v for v in tf.trainable_variables() if v.name.startswith('v%s/' % abs_device_num) ] else: params = tf.trainable_variables() return params class VariableMgrLocalReplicated(VariableMgr): """VariableMgr that implements the --replicated mode for local jobs. Each GPU has its own copy of the variables. To apply gradients, either a local all-reduce algorithm is applied or a regular cross-device aggregation is used to replicate the combined gradients to all towers. """ def __init__(self, benchmark_cnn, all_reduce_spec, agg_small_grads_max_bytes, agg_small_grads_max_group, allreduce_merge_scope): super(VariableMgrLocalReplicated, self).__init__(benchmark_cnn) if all_reduce_spec: spec = allreduce.parse_all_reduce_spec(all_reduce_spec) if len(spec) != 1: raise ValueError( 'replicated mode does not support hybrid all-reduce strategies') self._all_reduce_spec = spec[0] else: self._all_reduce_spec = None self._agg_small_grads_max_bytes = agg_small_grads_max_bytes self._agg_small_grads_max_group = agg_small_grads_max_group self._warmup_ops = [] self._allreduce_merge_scope = allreduce_merge_scope self._gradient_put_ops = None def each_tower_has_variables(self): return True def create_outer_variable_scope(self, device_num): return tf.variable_scope('v%s' % device_num, use_resource=self.use_resource_vars) def preprocess_device_grads(self, device_grads): compact_grads = (self.benchmark_cnn.params.use_fp16 and self.benchmark_cnn.params.compact_gradient_transfer) defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed') grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads] algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params) reduced_grads, self._warmup_ops = algorithm.batch_all_reduce( grads_to_reduce, self.benchmark_cnn.params.gradient_repacking, compact_grads, defer_grads) assert not self._warmup_ops if (self.benchmark_cnn.params.use_fp16 and self.benchmark_cnn.enable_auto_loss_scale): # Check for infs or nans is_finite_list = [] with tf.name_scope('check_for_inf_and_nan'): for tower_grads in reduced_grads: with tf.colocate_with(tower_grads[0]): # TODO(tanmingxing): Create fused op that takes in a list of tensors # as input and returns scalar boolean True if there are any # infs/nans. is_finite_list.append(tf.reduce_all( [tf.reduce_all(tf.is_finite(g)) for g in tower_grads])) self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list)) reduced_device_grads = [[ (g, v) for g, (_, v) in zip(grads, grad_vars) ] for grads, grad_vars in zip(reduced_grads, device_grads)] return self.benchmark_cnn.devices, reduced_device_grads def get_gradients_to_apply(self, device_num, gradient_state): device_grads = gradient_state return device_grads[device_num] def get_post_init_ops(self): # Copy initialized values for variables on GPU 0 to other GPUs. global_vars = tf.global_variables() var_by_name = dict([(v.name, v) for v in global_vars]) post_init_ops = [] copy_froms = set() skipped_vars = [] for v in global_vars: split_name = v.name.split('/') # TODO(b/62630508): use more specific prefix than v or v0. if split_name[0] == 'v0' or not v.name.startswith('v'): skipped_vars.append(v) continue # Only vars starts with "v[number]" are synced. split_name[0] = 'v0' copy_from = var_by_name['/'.join(split_name)] copy_froms.add(copy_from) post_init_ops.append(v.assign(copy_from.read_value())) post_init_ops += self._warmup_ops # If copy-froms is empty, then all vars are actually saved. misc_utils.print_out('All copy-from vars(%d): ' % len(copy_froms)) for gv in copy_froms: misc_utils.print_out(gv.name) misc_utils.print_out('All skippped vars(%d): ' % len(skipped_vars)) for gv in skipped_vars: misc_utils.print_out(gv.name) assert len(skipped_vars) >= len(copy_froms) return post_init_ops def savable_variables(self): """Return the set of variables used for saving/loading the model.""" params = [] for v in tf.global_variables(): split_name = v.name.split('/') if split_name[0] == 'v0' or not v.name.startswith('v'): params.append(v) return params def get_devices(self): return self.benchmark_cnn.raw_devices
PyTorch/SpeechSynthesis/Tacotron2/exports
exports
export_tacotron2_ts
# ***************************************************************************** # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import torch import argparse import sys sys.path.append('./') from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('--tacotron2', type=str, required=True, help='full path to the Tacotron2 model checkpoint file') parser.add_argument('-o', '--output', type=str, default="trtis_repo/tacotron/1/model.pt", help='filename for the Tacotron 2 TorchScript model') parser.add_argument('--fp16', action='store_true', help='inference with mixed precision') return parser def main(): parser = argparse.ArgumentParser( description='PyTorch Tacotron 2 Inference') parser = parse_args(parser) args = parser.parse_args() tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2, amp_run=args.fp16, cpu_run=False, forward_is_infer=True) jitted_tacotron2 = torch.jit.script(tacotron2) torch.jit.save(jitted_tacotron2, args.output) if __name__ == '__main__': main()
PyTorch/LanguageModeling/BART/utils
utils
activations
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import logging import math import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def swish(x): return x * torch.sigmoid(x) def _gelu_python(x): """ Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn.functional Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def gelu_new(x): """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu def gelu_fast(x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) ACT2FN = { "relu": F.relu, "swish": swish, "gelu": gelu, "tanh": torch.tanh, "gelu_new": gelu_new, "gelu_fast": gelu_fast, } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
PyTorch/Classification/ConvNets/efficientnet/inference/FP32
FP32
DGXA100_efficientnet-b4_FP32
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision FP32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json
MxNet/Classification/RN50v1.5/scripts
scripts
prepare_imagenet
#!/bin/bash # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if [ $# -lt 2 ] ; then echo "usage: $0 raw_dataset prepared_dataset" exit 1 fi cd "$2" && python /opt/mxnet/tools/im2rec.py --list --recursive train "$1/train" && python /opt/mxnet/tools/im2rec.py --list --recursive val "$1/val" && python /opt/mxnet/tools/im2rec.py --pass-through --num-thread 40 train "$1/train" && python /opt/mxnet/tools/im2rec.py --pass-through --num-thread 40 val "$1/val" && echo "Dataset was prepared succesfully!"
PyTorch/Recommendation/DLRM/preproc
preproc
run_spark_gpu_DGX-A100
#!/bin/bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ######################################################################### # File Name: run_spark_gpu_DGX-A100.sh set -e # the data path including 1TB criteo data, day_0, day_1, ... export INPUT_PATH=${1:-'/data/dlrm/criteo'} # the output path, use for generating the dictionary and the final dataset # the output folder should have more than 300GB export OUTPUT_PATH=${2:-'/data/dlrm/output'} export FREQUENCY_LIMIT=${3:-'15'} HARDWARE_PLATFORM='DGX-A100' # spark local dir should have about 3TB # the temporary path used for spark shuffle write export SPARK_LOCAL_DIRS='/data/dlrm/spark/tmp' source DGX-A100_config.sh OPTS="--frequency_limit $FREQUENCY_LIMIT" export SPARK_HOME=/opt/spark export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 export PATH=$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH # we use spark standalone to run the job export MASTER=spark://$HOSTNAME:7077 echo "Starting spark standalone" start-master.sh start-slave.sh $MASTER echo "Generating the dictionary..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=1 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ --conf spark.task.resource.gpu.amount=0.01 \ --conf spark.executor.resource.gpu.amount=1 \ --conf spark.plugins=com.nvidia.spark.SQLPlugin \ --conf spark.rapids.sql.concurrentGpuTasks=2 \ --conf spark.rapids.sql.reader.batchSizeRows=4000000 \ --conf spark.rapids.memory.pinnedPool.size=16g \ --conf spark.rapids.sql.explain=ALL \ --conf spark.sql.autoBroadcastJoinThreshold=1GB \ --conf spark.rapids.sql.incompatibleOps.enabled=true \ --conf spark.driver.maxResultSize=2G \ --conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \ spark_data_utils.py --mode generate_models \ $OPTS \ --input_folder $INPUT_PATH \ --days 0-23 \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_dict_log.txt echo "Transforming the train data from day_0 to day_22..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=16 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ --conf spark.task.resource.gpu.amount=0.5 \ --conf spark.executor.resource.gpu.amount=1 \ --conf spark.plugins=com.nvidia.spark.SQLPlugin \ --conf spark.rapids.sql.concurrentGpuTasks=2 \ --conf spark.rapids.sql.reader.batchSizeRows=4000000 \ --conf spark.rapids.memory.pinnedPool.size=16g \ --conf spark.rapids.sql.explain=ALL \ --conf spark.sql.autoBroadcastJoinThreshold=1GB \ --conf spark.rapids.sql.incompatibleOps.enabled=true \ --conf spark.driver.maxResultSize=2G \ --conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \ spark_data_utils.py --mode transform \ --input_folder $INPUT_PATH \ --days 0-22 \ --output_folder $OUTPUT_PATH/train \ --model_size_file $OUTPUT_PATH/model_size.json \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_train_log.txt echo "Splitting the last day into 2 parts of test and validation..." last_day=$INPUT_PATH/day_23 temp_test=$OUTPUT_PATH/temp/test temp_validation=$OUTPUT_PATH/temp/validation mkdir -p $temp_test $temp_validation lines=`wc -l $last_day | awk '{print $1}'` former=$((lines / 2)) latter=$((lines - former)) head -n $former $last_day > $temp_test/day_23 tail -n $latter $last_day > $temp_validation/day_23 echo "Transforming the test data in day_23..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=32 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ --conf spark.task.resource.gpu.amount=1 \ --conf spark.executor.resource.gpu.amount=1 \ --conf spark.plugins=com.nvidia.spark.SQLPlugin \ --conf spark.rapids.sql.concurrentGpuTasks=1 \ --conf spark.rapids.sql.reader.batchSizeRows=4000000 \ --conf spark.rapids.memory.pinnedPool.size=16g \ --conf spark.rapids.sql.explain=ALL \ --conf spark.sql.autoBroadcastJoinThreshold=1GB \ --conf spark.rapids.sql.incompatibleOps.enabled=true \ --conf spark.driver.maxResultSize=2G \ --conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \ spark_data_utils.py --mode transform \ --input_folder $temp_test \ --days 23-23 \ --output_folder $OUTPUT_PATH/test \ --output_ordering input \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_test_log.txt echo "Transforming the validation data in day_23..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=32 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ --conf spark.task.resource.gpu.amount=1 \ --conf spark.executor.resource.gpu.amount=1 \ --conf spark.plugins=com.nvidia.spark.SQLPlugin \ --conf spark.rapids.sql.concurrentGpuTasks=1 \ --conf spark.rapids.sql.reader.batchSizeRows=4000000 \ --conf spark.rapids.memory.pinnedPool.size=16g \ --conf spark.rapids.sql.explain=ALL \ --conf spark.sql.autoBroadcastJoinThreshold=1GB \ --conf spark.rapids.sql.incompatibleOps.enabled=true \ --conf spark.driver.maxResultSize=2G \ --conf spark.executor.extraJavaOptions="-Dcom.nvidia.cudf.prefer-pinned=true\ -Djava.io.tmpdir=$SPARK_LOCAL_DIRS" \ spark_data_utils.py --mode transform \ --input_folder $temp_validation \ --days 23-23 \ --output_folder $OUTPUT_PATH/validation \ --output_ordering input \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_validation_log.txt rm -r $temp_test $temp_validation stop-master.sh stop-slave.sh
TensorFlow/Detection/SSD/models/research/object_detection/models
models
faster_rcnn_resnet_v1_feature_extractor_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.models.faster_rcnn_resnet_v1_feature_extractor.""" import numpy as np import tensorflow as tf from object_detection.models import faster_rcnn_resnet_v1_feature_extractor as faster_rcnn_resnet_v1 class FasterRcnnResnetV1FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride, architecture='resnet_v1_101'): feature_extractor_map = { 'resnet_v1_50': faster_rcnn_resnet_v1.FasterRCNNResnet50FeatureExtractor, 'resnet_v1_101': faster_rcnn_resnet_v1.FasterRCNNResnet101FeatureExtractor, 'resnet_v1_152': faster_rcnn_resnet_v1.FasterRCNNResnet152FeatureExtractor } return feature_extractor_map[architecture]( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): for architecture in ['resnet_v1_50', 'resnet_v1_101', 'resnet_v1_152']: feature_extractor = self._build_feature_extractor( first_stage_features_stride=16, architecture=architecture) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 14, 14, 1024]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [4, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [4, 28, 28, 1024]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 1024]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_on_very_small_images(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3)) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) with self.assertRaises(tf.errors.InvalidArgumentError): sess.run( features_shape, feed_dict={preprocessed_inputs: np.random.rand(4, 32, 32, 3)}) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [3, 7, 7, 1024], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [3, 7, 7, 2048]) if __name__ == '__main__': tf.test.main()
TensorFlow/Segmentation/UNet_Industrial/model/layers
layers
utils
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== import horovod.tensorflow as hvd from utils import hvd_utils __all__ = ["_log_hparams"] def _log_hparams(classname, layername, **kwargs): log_msg = "%s: `%s`" % (classname, layername) for arg, val in sorted(kwargs.items()): log_msg += "\n\t[*] {}: {}".format(arg, val) log_msg += "\n" if not hvd_utils.is_using_hvd() or hvd.rank() == 0: print(log_msg)
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading
dataloading
gen_csv
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser import pandas as pd import os import numpy as np from .defaults import NUMERICAL_CHANNEL, LABEL_CHANNEL from .feature_spec import FeatureSpec def parse_args(): parser = ArgumentParser() parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml', help='Name of the input feature specification file') parser.add_argument('--output', type=str, default='/data') parser.add_argument('--size', type=int, default=1000) return parser.parse_args() def main(): args = parse_args() dataset_size = args.size fspec_in = FeatureSpec.from_yaml(args.feature_spec_in) fspec_in.base_directory = args.output cat_cardinalities = fspec_in.get_categorical_sizes() cat_names = fspec_in.get_categorical_feature_names() cardinalities = {name: cardinality for name, cardinality in zip(cat_names, cat_cardinalities)} input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0] numerical_names_set = set(fspec_in.channel_spec[NUMERICAL_CHANNEL]) for mapping_name, mapping in fspec_in.source_spec.items(): for chunk in mapping: assert chunk['type'] == 'csv', "Only csv files supported in this generator" assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder" path_to_save = os.path.join(fspec_in.base_directory, chunk['files'][0]) data = [] for name in chunk['features']: if name == input_label_feature_name: data.append(np.random.randint(0, 1, size=dataset_size)) elif name in numerical_names_set: data.append(np.random.rand(dataset_size)) else: local_cardinality = cardinalities[name] data.append(np.random.randint(0, local_cardinality, size=dataset_size)) values = np.stack(data).T to_save = pd.DataFrame(values, columns=chunk['features']) os.makedirs(os.path.dirname(path_to_save), exist_ok=True) to_save.to_csv(path_to_save, index=False, header=False) if __name__ == "__main__": main()
TensorFlow2/Classification/ConvNets/model/layers
layers
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from model.layers.activations import simple_swish, hard_swish, identity, gelu, get_activation from model.layers.normalization import get_batch_norm __all__ = ['simple_swish', 'hard_swish', 'identity', 'gelu', 'get_activation', 'get_batch_norm']
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact
dot_based_interact
dot_based_interact_fp16_fwd
#include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <mma.h> #include <cuda_fp16.hpp> #include <fstream> #include <iomanip> #include <iostream> #include <vector> #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include "shared_utils.cuh" using namespace nvcuda; template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS, uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint WARP_SIZE, uint WARP_SIZE_LOG_2, uint TILE_DIM, uint TILE_DIM_LOG_2> __launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernelNonAligned(const __half *__restrict input, __half *__restrict output, uint batch_size, uint num_rows, uint num_cols, uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp, uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps, uint padding_size) { uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2); //each threadblock covers multiple (4) samples //num_rows is num of categoricals + 1, num_cols is embedding/bottom_mlp size int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id; //each warp covers a sample if (sample_id >= batch_size) { return; } int lane_id = threadIdx.x & (WARP_SIZE - 1); //0...32, within a sample extern __shared__ half shmem_dynamic[]; half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp); //skip to the input for our warp const half *sample_input = input + num_rows * num_cols * sample_id; //copy all rows of our input (all embeddings and bottom_mlp) for (uint i = 0; i < num_rows; ++i, sample_input += num_cols) { //each thread is assigned pieces to copy based on lane_id for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) { (shmem + i * SMEM_STRIDE)[idx] = sample_input[idx]; } } uint idx = lane_id + num_cols; //pad each embedding to num_cols_after_padding //this assumes that num_cols_after_padding-num_cols<= WARP_SIZE if (idx < num_cols_after_padding) { for (int i = 0; i < num_rows; ++i) { (shmem + i * SMEM_STRIDE)[idx] = __float2half(0); } } //add more fake embeddings filled with zeros so we can better use cores //zero out 4 cells at once, hence the >>2 half4 zeros; zeros.vals[0].x = __float2half(0); zeros.vals[0].y = __float2half(0); zeros.vals[1].x = __float2half(0); zeros.vals[1].y = __float2half(0); if (lane_id < (num_cols_after_padding >> 2)) { for (int i = num_rows; i < num_rows_after_padding; i++) { ((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros; } } __syncwarp(); half *gmem_output = output + output_size * sample_id; //copy over the bottom_mlp_output into the final result //assumes bottom_mlp_output is at the start of the input for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) { gmem_output[idx] = shmem[idx]; } //compute the dot product wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS]; for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { wmma::fill_fragment(acc[i][j], 0); } } for (int k_step = 0; k_step < num_col_steps; k_step++) { wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS]; wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS]; for (int j = 0; j < M_BLOCKS; j++) { int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16; const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16); wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE); wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE); } for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]); } } } float *shmem_store = reinterpret_cast<float *>(shmem); for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16); wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major); } } // skip over the part where we copied the bottom_mlp_output half *gmem_interact_output = gmem_output + num_cols; // copy over the dot product result into the output int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp; int srcLine = 0; for (int i = 0; i < num_rows; ++i, ++srcLine) { if (i == ((M_BLOCKS - 1) * 16)) { srcLine += lastRowBlockOffset; } if (lane_id < i) { //this assumes we have num_categorical_features<WARP_SIZE uint offset = (i * (i - 1)) >> 1; gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]); } } // Add padding to the output vectors if (lane_id < padding_size) { gmem_output[output_size - lane_id - 1] = __float2half(0); } } template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint M_BLOCKS, uint K_BLOCKS, uint SMEM_STRIDE, uint SMEM_STRIDE_ACC, uint WARP_SIZE, uint WARP_SIZE_LOG_2, uint TILE_DIM, uint TILE_DIM_LOG_2> __launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractFwdKernel(const __half *__restrict input, __half *__restrict output, uint batch_size, uint num_rows, uint num_cols, uint num_rows_after_padding, uint num_cols_after_padding, uint smem_elems_per_warp, uint smem_rows_per_warp, uint output_size, uint num_row_steps, uint num_col_steps, uint padding_size) { uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2); //each threadblock covers multiple (4) samples //num_rows is num of categoricals + 1, num_cols is embedding/bottom_mlp size int sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id; //each warp covers a sample if (sample_id >= batch_size) { return; } int lane_id = threadIdx.x & (WARP_SIZE - 1); //0...32, within a sample extern __shared__ half shmem_dynamic[]; half *shmem = shmem_dynamic + (warp_id * smem_elems_per_warp); //piece of threadblocks memory corresponding to our sample const half *sample_input = input + num_rows * num_cols * sample_id; //jump to our sample //loop over embeddings, and copy each into shmem (but assume size is <=128>) if (lane_id < (num_cols >> 2)) {//divided by 4 because we copy four at once for (int i = 0; i < num_rows; ++i, sample_input += num_cols) { ((float2 *)(shmem + i * SMEM_STRIDE))[lane_id] = ((float2 *)sample_input)[lane_id]; } } //pad each embedding to num_cols_after_padding //this assumes that num_cols_after_padding-num_cols<= WARP_SIZE uint idx = lane_id + num_cols; if (idx < num_cols_after_padding) {// the padding is to compute in tiles for (int i = 0; i < num_rows; ++i) { (shmem + i * SMEM_STRIDE)[idx] = __float2half(0); } } //add more fake embeddings filled with zeros so we can better use cores //zero out 4 cells at once, hence the >>2 half4 zeros; zeros.vals[0].x = __float2half(0); zeros.vals[0].y = __float2half(0); zeros.vals[1].x = __float2half(0); zeros.vals[1].y = __float2half(0); if (lane_id < (num_cols_after_padding >> 2)) { for (int i = num_rows; i < num_rows_after_padding; i++) { ((half4 *)(shmem + i * SMEM_STRIDE))[lane_id] = zeros; } } __syncwarp(); half *gmem_output = output + output_size * sample_id; //copy over bottom mlp into output memory if (lane_id < (num_cols >> 2)) { ((float2 *)gmem_output)[lane_id] = ((float2 *)shmem)[lane_id]; } //compute the dot product wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[M_BLOCKS][M_BLOCKS]; for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { wmma::fill_fragment(acc[i][j], 0); } } for (int k_step = 0; k_step < num_col_steps; k_step++) { wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[M_BLOCKS]; wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::col_major> b[M_BLOCKS]; for (int j = 0; j < M_BLOCKS; j++) { int base_row = (j < M_BLOCKS - 1) ? j * 16 : smem_rows_per_warp - 16; const half *tile_ptr = shmem + (base_row * SMEM_STRIDE + k_step * 16); wmma::load_matrix_sync(a[j], tile_ptr, SMEM_STRIDE); wmma::load_matrix_sync(b[j], tile_ptr, SMEM_STRIDE); } for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]); } } } float *shmem_store = reinterpret_cast<float *>(shmem); for (int i = 0; i < M_BLOCKS; i++) { for (int j = 0; j < M_BLOCKS; j++) { float *tile_ptr = shmem_store + (i * 16 * SMEM_STRIDE_ACC + j * 16); wmma::store_matrix_sync(tile_ptr, acc[i][j], SMEM_STRIDE_ACC, wmma::mem_row_major); } } // skip over the part where we copied the bottom_mlp_output half *gmem_interact_output = gmem_output + num_cols; // copy over the dot product result into the output int lastRowBlockOffset = M_BLOCKS * 16 - smem_rows_per_warp; int srcLine = 0; for (int i = 0; i < num_rows; ++i, ++srcLine) { if (i == ((M_BLOCKS - 1) * 16)) { srcLine += lastRowBlockOffset; } if (lane_id < i) { //this assumes we have num_categorical_features (num_rows-1)<WARP_SIZE uint offset = (i * (i - 1)) >> 1; gmem_interact_output[offset + lane_id] = __float2half(shmem_store[srcLine * SMEM_STRIDE_ACC + lane_id]); } } // Add padding to the output vectors if (lane_id < padding_size) { gmem_output[output_size - lane_id - 1] = __float2half(0); } } inline void dotBasedInteractFwd( const void *input, const void *bottom_mlp_output, void *output, uint batch_size, uint num_rows, uint num_cols) { const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kTileDim = 16; const uint kTileDimLog2 = Log2<kTileDim>::value; const uint warps_per_threadblock = 4; const uint threadblock_size = warps_per_threadblock * 32; const uint kRowTilesPerStep = 2; const uint kColTilesPerStep = 1; // num tiles uint num_row_tiles = (num_rows + kTileDim - 1) >> kTileDimLog2; uint num_col_tiles = (num_cols + kTileDim - 1) >> kTileDimLog2; // number of rows and columns after padding uint num_rows_after_padding = kTileDim << 1; //32 rows uint num_cols_after_padding = num_col_tiles << kTileDimLog2; //num cols rounded up to 16 uint num_row_steps = num_row_tiles / kRowTilesPerStep; uint num_col_steps = num_col_tiles / kColTilesPerStep; const uint K_BLOCKS = 8; const uint M_BLOCKS = 2; const uint SKEW_HALF = ((K_BLOCKS % 2) == 0) ? 8 : 0; const uint SMEM_STRIDE = (K_BLOCKS * 16 + SKEW_HALF); // multiple of 2 to guarantee 256-bit alignment for start of the row, at least 16 to safeload a tile const uint smem_rows_per_warp = M_BLOCKS << 4; const uint smem_elems_per_warp_mat = smem_rows_per_warp * SMEM_STRIDE; const uint SKEW_HALF_ACC = ((M_BLOCKS % 2) == 0) ? 8 : 0; const uint SMEM_STRIDE_ACC = (M_BLOCKS * 16 + SKEW_HALF_ACC); const uint smem_elems_per_warp_acc = M_BLOCKS * 16 * SMEM_STRIDE_ACC * 2; // output in FP32 const uint smem_elems_per_warp = (smem_elems_per_warp_mat > smem_elems_per_warp_acc) ? smem_elems_per_warp_mat : smem_elems_per_warp_acc; uint raw_output_size = ((num_rows * (num_rows - 1)) >> 1) + num_cols; uint output_size = ((raw_output_size-1)/8 + 1)*8; //round up to multiple of 8 uint padding_size = output_size-raw_output_size; bool float4_predicate = !((num_cols & 7) || (output_size & 7)); if (float4_predicate) { dotBasedInteractFwdKernel<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS, SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2> <<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, //each threadblock covers warps_per_threadblock samples, each warp covers a sample threadblock_size, warps_per_threadblock * smem_elems_per_warp * sizeof(__half), at::cuda::getCurrentCUDAStream()>>>((const __half *)input, (half *)output, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp, smem_rows_per_warp, output_size, num_row_steps, num_col_steps, padding_size); } else { dotBasedInteractFwdKernelNonAligned<warps_per_threadblock, threadblock_size, M_BLOCKS, K_BLOCKS, SMEM_STRIDE, SMEM_STRIDE_ACC, kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2> <<<(batch_size + warps_per_threadblock - 1) / warps_per_threadblock, threadblock_size, warps_per_threadblock * smem_elems_per_warp * sizeof(__half), at::cuda::getCurrentCUDAStream()>>>((const __half *)input, (half *)output, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, smem_elems_per_warp, smem_rows_per_warp, output_size, num_row_steps, num_col_steps, padding_size); } }
TensorFlow2/Segmentation/MaskRCNN/scripts
scripts
train
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script that simplifies model training followed by evaluation. """ import argparse import os import shutil import subprocess from pathlib import Path LOCK_FILE = Path('/tmp/mrcnn_tf2.lock') class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter): pass if __name__ == '__main__': # CLI flags # noinspection PyTypeChecker parser = argparse.ArgumentParser( description=( 'NVIDIA MaskRCNN TF2 train' '\n\nNote: Any additional flags not specified below will be passed to main.py' ), formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100) ) parser.add_argument('--gpus', type=int, metavar='N', help='Number of GPU\'s. Defaults to all available') parser.add_argument('--batch_size', type=int, metavar='N', default=4, help='Batch size used during training') parser.add_argument('--amp', action='store_true', help='Enable automatic mixed precision') parser.add_argument('--no_xla', action='store_true', help='Disables XLA - accelerated linear algebra') parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data', help='Input directory containing the dataset') parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights', help='Directory containing pre-trained resnet weights') parser.add_argument('--slurm_lock', action='store_true', help='Prevent this script from being launched multiple times when used in multi-gpu slurm setup') parser.add_argument('--no_eval', action='store_true', help='Disables evaluation after training.') flags, remainder = parser.parse_known_args() main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py')) checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp") # build commands cmd_train = ( f'python {main_path}' f' train' f' --data_dir "{flags.data_dir}"' f' --backbone_checkpoint "{checkpoint_path}"' f' --train_batch_size {flags.batch_size}' ) cmd_eval = ( f'python {main_path}' f' eval' f' --data_dir "{flags.data_dir}"' f' --eval_file "{os.path.join(flags.data_dir, "annotations/instances_val2017.json")}"' ) if not flags.no_xla: cmd_train += ' --xla' cmd_eval += ' --xla' if flags.amp: cmd_train += ' --amp' cmd_eval += ' --amp' if remainder: cmd_train += ' ' + ' '.join(remainder) cmd_eval += ' ' + ' '.join(remainder) if flags.gpus is not None: cmd_train = f'CUDA_VISIBLE_DEVICES={",".join(map(str, range(flags.gpus)))} ' + cmd_train # print command line = '-' * shutil.get_terminal_size()[0] print(line, cmd_train, line, sep='\n', flush=True) # acquire lock if --slurm_lock is provided try: flags.slurm_lock and LOCK_FILE.touch(exist_ok=False) except FileExistsError: print(f'Failed to acquire lock ({LOCK_FILE}) - skipping') exit(0) # run training code = subprocess.call(cmd_train, shell=True) # evaluation if not code and not flags.no_eval: print(line, cmd_eval, line, sep='\n', flush=True) code = subprocess.call(cmd_eval, shell=True) flags.slurm_lock and LOCK_FILE.unlink() exit(code)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
timedObject
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_TIMEDOBJECT_H #define TT2I_TIMEDOBJECT_H #include "componentTiming.h" #include "timer.h" #include <string> namespace tts { class TimedObject { public: /** * @brief Create a new timed object. * * @param name The name of the object. */ TimedObject(const std::string& name) : mName(name) , mTimer() , mChildren() { // do nothing } /** * @brief Virtual destructor. */ virtual ~TimedObject() = default; /** * @brief Get the timing of this current object (and all of its children). * * @return The timing. */ virtual ComponentTiming getTiming() const { ComponentTiming time(mName, mTimer.poll()); for (const TimedObject* const child : mChildren) { time.addSubTiming(child->getTiming()); } return time; } /** * @brief Reset the timing of the current object (and all of its children). */ void resetTiming() { mTimer.reset(); for (TimedObject* const child : mChildren) { child->resetTiming(); } } /** * @brief Print the timing of the current object (and all of its children) * to the given stream. * * @param stream The stream to print to. * @param numRuns The number of runs to average the times over. */ void printTiming(std::ostream& stream, const int numRuns = 1) const { getTiming().print(stream, numRuns); } protected: /** * @brief Add a child object. The child object must remain at this memory * location until this object is destroyed. * * @param child The child object. */ void addChild(TimedObject* const child) { mChildren.emplace_back(child); } /** * @brief Start the internal timer. */ void startTiming() { mTimer.start(); } /** * @brief Stop the internal timer. */ void stopTiming() { mTimer.stop(); } private: std::string mName; Timer mTimer; std::vector<TimedObject*> mChildren; }; } // namespace tts #endif
PyTorch/SpeechRecognition/Jasper/utils
utils
convert_librispeech
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env python import argparse import os import glob import multiprocessing import json import pandas as pd from preprocessing_utils import parallel_preprocess parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.') parser.add_argument('--input_dir', type=str, required=True, help='LibriSpeech collection input dir') parser.add_argument('--dest_dir', type=str, required=True, help='Output dir') parser.add_argument('--output_json', type=str, default='./', help='name of the output json file.') parser.add_argument('-s','--speed', type=float, nargs='*', help='Speed perturbation ratio') parser.add_argument('--target_sr', type=int, default=None, help='Target sample rate. ' 'defaults to the input sample rate') parser.add_argument('--overwrite', action='store_true', help='Overwrite file if exists') parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(), help='Number of threads to use when processing audio files') args = parser.parse_args() args.input_dir = args.input_dir.rstrip('/') args.dest_dir = args.dest_dir.rstrip('/') def build_input_arr(input_dir): txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'), recursive=True) input_data = [] for txt_file in txt_files: rel_path = os.path.relpath(txt_file, input_dir) with open(txt_file) as fp: for line in fp: fname, _, transcript = line.partition(' ') input_data.append(dict(input_relpath=os.path.dirname(rel_path), input_fname=fname+'.flac', transcript=transcript)) return input_data print("[%s] Scaning input dir..." % args.output_json) dataset = build_input_arr(input_dir=args.input_dir) print("[%s] Converting audio files..." % args.output_json) dataset = parallel_preprocess(dataset=dataset, input_dir=args.input_dir, dest_dir=args.dest_dir, target_sr=args.target_sr, speed=args.speed, overwrite=args.overwrite, parallel=args.parallel) print("[%s] Generating json..." % args.output_json) df = pd.DataFrame(dataset, dtype=object) # Save json with python. df.to_json() produces back slashed in file paths dataset = df.to_dict(orient='records') with open(args.output_json, 'w') as fp: json.dump(dataset, fp, indent=2)
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses
losses
weighted_sparse_categorical_crossentropy
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sparse categorical cross-entropy losses.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf def _adjust_labels(labels, predictions): """Adjust the 'labels' tensor by squeezing it if needed.""" labels = tf.cast(labels, tf.int32) if len(predictions.shape) == len(labels.shape): labels = tf.squeeze(labels, [-1]) return labels, predictions def _validate_rank(labels, predictions, weights): if weights is not None and len(weights.shape) != len(labels.shape): raise RuntimeError( ("Weight and label tensors were not of the same rank. weights.shape " "was %s, and labels.shape was %s.") % (predictions.shape, labels.shape)) if (len(predictions.shape) - 1) != len(labels.shape): raise RuntimeError( ("Weighted sparse categorical crossentropy expects `labels` to have a " "rank of one less than `predictions`. labels.shape was %s, and " "predictions.shape was %s.") % (labels.shape, predictions.shape)) def per_example_loss(labels, predictions, weights=None): """Calculate a per-example sparse categorical crossentropy loss. This loss function assumes that the predictions are post-softmax. Args: labels: The labels to evaluate against. Should be a set of integer indices ranging from 0 to (vocab_size-1). predictions: The network predictions. Should have softmax already applied. weights: An optional weight array of the same shape as the 'labels' array. If None, all examples will be used. Returns: A tensor of shape predictions.shape[:-1] containing the per-example loss. """ # When using these functions with the Keras core API, we will need to squeeze # the labels tensor - Keras adds a spurious inner dimension. labels, predictions = _adjust_labels(labels, predictions) _validate_rank(labels, predictions, weights) labels_one_hot = tf.keras.backend.one_hot(labels, predictions.shape[-1]) labels_one_hot = tf.keras.backend.cast(labels_one_hot, predictions.dtype) per_example_loss_data = -tf.keras.backend.sum( predictions * labels_one_hot, axis=[-1]) if weights is not None: weights = tf.keras.backend.cast(weights, per_example_loss_data.dtype) per_example_loss_data = weights * per_example_loss_data return per_example_loss_data def loss(labels, predictions, weights=None): """Calculate a per-batch sparse categorical crossentropy loss. This loss function assumes that the predictions are post-softmax. Args: labels: The labels to evaluate against. Should be a set of integer indices ranging from 0 to (vocab_size-1). predictions: The network predictions. Should have softmax already applied. weights: An optional weight array of the same shape as the 'labels' array. If None, all examples will be used. Returns: A loss scalar. Raises: RuntimeError if the passed tensors do not have the same rank. """ # When using these functions with the Keras core API, we will need to squeeze # the labels tensor - Keras adds a spurious inner dimension. labels, predictions = _adjust_labels(labels, predictions) _validate_rank(labels, predictions, weights) per_example_loss_data = per_example_loss(labels, predictions, weights) if weights is None: return tf.keras.backend.mean(per_example_loss_data) else: numerator = tf.keras.backend.sum(per_example_loss_data) weights = tf.keras.backend.cast(weights, predictions.dtype) denominator = tf.keras.backend.sum(weights) + 1e-5 return numerator / denominator
TensorFlow/Detection/SSD/models/research/object_detection/core
core
balanced_positive_negative_sampler
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired batch_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired batch_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. """ import tensorflow as tf from object_detection.core import minibatch_sampler from object_detection.utils import ops class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): """Subsamples minibatches to a desired balance of positives and negatives.""" def __init__(self, positive_fraction=0.5, is_static=False): """Constructs a minibatch sampler. Args: positive_fraction: desired fraction of positive examples (scalar in [0,1]) in the batch. is_static: If True, uses an implementation with static shape guarantees. Raises: ValueError: if positive_fraction < 0, or positive_fraction > 1 """ if positive_fraction < 0 or positive_fraction > 1: raise ValueError('positive_fraction should be in range [0,1]. ' 'Received: %s.' % positive_fraction) self._positive_fraction = positive_fraction self._is_static = is_static def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): """Counts the number of positives and negatives numbers to be sampled. Args: sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains the signed indices of the examples where the sign is based on the label value. The examples that cannot be sampled are set to 0. It samples atmost sample_size*positive_fraction positive examples and remaining from negative examples. sample_size: Size of subsamples. Returns: A tuple containing the number of positive and negative labels in the subsample. """ input_length = tf.shape(sorted_indices_tensor)[0] valid_positive_index = tf.greater(sorted_indices_tensor, tf.zeros(input_length, tf.int32)) num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32)) max_num_positive_samples = tf.constant( int(sample_size * self._positive_fraction), tf.int32) num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) num_negative_samples = tf.constant(sample_size, tf.int32) - num_positive_samples return num_positive_samples, num_negative_samples def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.float32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, total_num_samples, dtype=tf.float32) return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]), tf.int32) def _static_subsample(self, indicator, batch_size, labels): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. N should be a complie time constant. batch_size: desired batch size. This scalar cannot be None. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. N should be a complie time constant. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. It ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ # Check if indicator and labels have a static size. if not indicator.shape.is_fully_defined(): raise ValueError('indicator must be static in shape when is_static is' 'True') if not labels.shape.is_fully_defined(): raise ValueError('labels must be static in shape when is_static is' 'True') if not isinstance(batch_size, int): raise ValueError('batch_size has to be an integer when is_static is' 'True.') input_length = tf.shape(indicator)[0] # Set the number of examples set True in indicator to be at least # batch_size. num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32)) additional_false_sample = tf.less_equal( tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), batch_size - num_true_sampled) indicator = tf.logical_or(indicator, additional_false_sample) # Shuffle indicator and label. Need to store the permutation to restore the # order post sampling. permutation = tf.random_shuffle(tf.range(input_length)) indicator = ops.matmul_gather_on_zeroth_axis( tf.cast(indicator, tf.float32), permutation) labels = ops.matmul_gather_on_zeroth_axis( tf.cast(labels, tf.float32), permutation) # index (starting from 1) when indicator is True, 0 when False indicator_idx = tf.where( tf.cast(indicator, tf.bool), tf.range(1, input_length + 1), tf.zeros(input_length, tf.int32)) # Replace -1 for negative, +1 for positive labels signed_label = tf.where( tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32), tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) # negative of index for negative label, positive index for positive label, # 0 when indicator is False. signed_indicator_idx = tf.multiply(indicator_idx, signed_label) sorted_signed_indicator_idx = tf.nn.top_k( signed_indicator_idx, input_length, sorted=True).values [num_positive_samples, num_negative_samples] = self._get_num_pos_neg_samples( sorted_signed_indicator_idx, batch_size) sampled_idx = self._get_values_from_start_and_end( sorted_signed_indicator_idx, num_positive_samples, num_negative_samples, batch_size) # Shift the indices to start from 0 and remove any samples that are set as # False. sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) sampled_idx = tf.multiply( tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), sampled_idx) sampled_idx_indicator = tf.cast(tf.reduce_sum( tf.one_hot(sampled_idx, depth=input_length), axis=0), tf.bool) # project back the order based on stored permutations reprojections = tf.one_hot(permutation, depth=input_length, dtype=tf.float32) return tf.cast(tf.tensordot( tf.cast(sampled_idx_indicator, tf.float32), reprojections, axes=[0, 0]), tf.bool) def subsample(self, indicator, batch_size, labels, scope=None): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches self._positive_fraction. It cannot be None is is_static is True. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. scope: name scope. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ if len(indicator.get_shape().as_list()) != 1: raise ValueError('indicator must be 1 dimensional, got a tensor of ' 'shape %s' % indicator.get_shape()) if len(labels.get_shape().as_list()) != 1: raise ValueError('labels must be 1 dimensional, got a tensor of ' 'shape %s' % labels.get_shape()) if labels.dtype != tf.bool: raise ValueError('labels should be of type bool. Received: %s' % labels.dtype) if indicator.dtype != tf.bool: raise ValueError('indicator should be of type bool. Received: %s' % indicator.dtype) with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'): if self._is_static: return self._static_subsample(indicator, batch_size, labels) else: # Only sample from indicated samples negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if batch_size is None: max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx)) else: max_num_pos = int(self._positive_fraction * batch_size) sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32)) if batch_size is None: negative_positive_ratio = ( 1 - self._positive_fraction) / self._positive_fraction max_num_neg = tf.to_int32( negative_positive_ratio * tf.to_float(num_sampled_pos)) else: max_num_neg = batch_size - num_sampled_pos sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
TensorFlow/Classification/ConvNets/triton/deployment_toolkit
deployment_toolkit
dump
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import json import pickle import threading from pathlib import Path from typing import Dict, Iterator, List, Union import numpy as np MB2B = 2 ** 20 B2MB = 1 / MB2B FLUSH_THRESHOLD_B = 256 * MB2B def _validate_batch(name: str, value: Union[list, np.ndarray]): if not isinstance(value, (list, np.ndarray)): raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}") def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]): batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()} names = list(batch_sizes_per_io_name) for io_name in names: for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]): if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]): non_equal_batch_sizes = { other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names } non_equal_batch_sizes_str = ", ".join( [f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()] ) raise ValueError( "All inputs/outputs should have same number of batches with equal batch_size. " f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}" ) # ensure if each io has same number of batches with equal size def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]): nitems = 0 nbatches = 0 if prefix_data: nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()} nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()} nitems = list(nitems_per_io_name.values())[0] nbatches = list(nbatches_per_io_name.values())[0] return nitems, nbatches class BaseDumpWriter(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, output_dir: Union[str, Path]): self._output_dir = Path(output_dir) # outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name # list is list of batches self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {} # key is prefix self._items_counters: Dict[str, int] = {} self._cache_lock = threading.RLock() self._flush_threshold_b = FLUSH_THRESHOLD_B @property def cache_size(self): def _get_bytes_size(name, batch): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.narray(batch) return batch.nbytes with self._cache_lock: return { prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches) for prefix, data in self._items_cache.items() } def _append_to_cache(self, prefix, prefix_data): if prefix_data is None: return if not isinstance(prefix_data, dict): raise ValueError(f"{prefix} data to store shall be dict") with self._cache_lock: cached_prefix_data = self._items_cache.setdefault(prefix, {}) for name, batch in prefix_data.items(): _validate_batch(name, batch) if not isinstance(batch, np.ndarray): batch = np.array(batch) cached_batches = cached_prefix_data.setdefault(name, []) cached_batches += [batch] def write(self, **kwargs): with self._cache_lock: for prefix, prefix_data in kwargs.items(): self._append_to_cache(prefix, prefix_data) biggest_prefix_data_size = max(self.cache_size.values()) if biggest_prefix_data_size > self._flush_threshold_b: self.flush() def flush(self): with self._cache_lock: for prefix, prefix_data in self._items_cache.items(): _validate_prefix_data(prefix_data) output_path = self._output_dir / self._get_filename(prefix) self._dump(prefix_data, output_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) self._items_counters[prefix] += nitems self._items_cache = {} def _get_filename(self, prefix): idx = self._items_counters.setdefault(prefix, 0) return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}" @abc.abstractmethod def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): pass def __enter__(self): if self._output_dir.exists() and len(list(self._output_dir.iterdir())): raise ValueError(f"{self._output_dir.as_posix()} is not empty") self._output_dir.mkdir(parents=True, exist_ok=True) return self def __exit__(self, exc_type, exc_val, exc_tb): self.flush() class PickleDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".pkl" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("wb") as pickle_file: pickle.dump(prefix_data, pickle_file) class JsonDumpWriter(BaseDumpWriter): FILE_SUFFIX = ".json" def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path): repacked_prefix_data = self._format_data(prefix_data) output_path.parent.mkdir(parents=True, exist_ok=True) with output_path.open("w") as json_file: json.dump(repacked_prefix_data, json_file) def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict: def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray): return { "content": batch.flatten().tolist(), "shape": list(batch.shape), "dtype": str(batch.dtype), } _, nbatches = _get_nitems_and_batches(prefix_data) batches = [{} for _ in range(nbatches)] for io_name, batches_per_io in prefix_data.items(): for batch_idx, batch in enumerate(batches_per_io): batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch) return {"data": batches} class BaseDumpReader(abc.ABC): FILE_SUFFIX = ".abstract" def __init__(self, dump_dir: Union[Path, str]): self._dump_dir = Path(dump_dir) def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]: dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}")) for dump_file_path in dump_files_paths: prefix_data = self._load_file(dump_file_path) nitems, nbatches = _get_nitems_and_batches(prefix_data) for batch_idx in range(nbatches): yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data} @abc.abstractmethod def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: pass def iterate_over(self, prefix_list: List[str]) -> Iterator: iterators = [self.get(prefix) for prefix in prefix_list] empty_iterators = [False] * len(iterators) while not all(empty_iterators): values = [None] * len(iterators) for idx, iterator in enumerate(iterators): if empty_iterators[idx]: continue try: values[idx] = next(iterator) except StopIteration: empty_iterators[idx] = True if all(empty_iterators): break if not all(empty_iterators): yield values def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): pass class PickleDumpReader(BaseDumpReader): FILE_SUFFIX = ".pkl" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as pickle_file: return pickle.load(pickle_file) class JsonDumpReader(BaseDumpReader): FILE_SUFFIX = ".json" def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]: with dump_file_path.open("rb") as json_file: data = json.load(json_file) return self._repack_data(data) def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]: result: Dict[str, List[np.ndarray]] = {} batches = data["data"] for batch in batches: for io_name, batch_as_dict in batch.items(): io_batches = result.setdefault(io_name, []) flat_array = batch_as_dict["content"] shape = batch_as_dict["shape"] dtype = batch_as_dict["dtype"] batch_as_array = np.array(flat_array).reshape(shape).astype(dtype) io_batches.append(batch_as_array) return result
TensorFlow2/Detection/Efficientdet/scripts/D0
D0
inference
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p /tmp/inference python3 infer.py \ --image_path=testdata/img1.jpg \ --output_dir=/tmp/inference \ --model_dir=${MODEL_DIR} \ 2>&1 | tee /tmp/inference/infer.log
PyTorch/SpeechRecognition/Jasper/scripts/docker
docker
launch
#!/bin/bash SCRIPT_DIR=$(cd $(dirname $0); pwd) : ${JASPER_REPO:="$SCRIPT_DIR/../.."} : ${DATA_DIR:=${1:-"$JASPER_REPO/datasets"}} : ${CHECKPOINT_DIR:=${2:-"$JASPER_REPO/checkpoints"}} : ${OUTPUT_DIR:=${3:-"$JASPER_REPO/results"}} : ${SCRIPT:=${4:-}} mkdir -p $DATA_DIR mkdir -p $CHECKPOINT_DIR mkdir -p $OUTPUT_DIR MOUNTS="" MOUNTS+=" -v $DATA_DIR:/datasets" MOUNTS+=" -v $CHECKPOINT_DIR:/checkpoints" MOUNTS+=" -v $OUTPUT_DIR:/results" MOUNTS+=" -v $JASPER_REPO:/workspace/jasper" echo $MOUNTS docker run -it --rm --gpus all \ --env PYTHONDONTWRITEBYTECODE=1 \ --ipc=host \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ $MOUNTS \ $EXTRA_JASPER_ENV \ -w /workspace/jasper \ jasper:latest bash $SCRIPT
PaddlePaddle/LanguageModeling/BERT
BERT
modeling
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import copy from dataclasses import dataclass import logging import paddle import paddle.nn as nn try: from paddle.incubate.nn import FusedTransformerEncoderLayer except ImportError: FusedTransformerEncoderLayer = None __all__ = [ 'BertModel', 'BertForPretraining', 'BertPretrainingHeads', 'BertForQuestionAnswering' ] @dataclass class BertConfig: vocab_size: int = 30528 hidden_size: int = 768 num_hidden_layers: int = 12 num_attention_heads: int = 12 intermediate_size: int = 3072 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 attention_probs_dropout_prob: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 output_all_encoded_layers: bool = False pad_token_id: int = 0 @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig() for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" class BertEmbeddings(nn.Layer): """ Include embeddings from word, position and token_type embeddings """ def __init__(self, bert_config): super().__init__() self.word_embeddings = nn.Embedding(bert_config.vocab_size, bert_config.hidden_size) self.position_embeddings = nn.Embedding( bert_config.max_position_embeddings, bert_config.hidden_size) self.token_type_embeddings = nn.Embedding(bert_config.type_vocab_size, bert_config.hidden_size) self.layer_norm = nn.LayerNorm(bert_config.hidden_size, epsilon=1e-12) self.dropout = nn.Dropout(bert_config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None): """ Args: See class `BertModel`. """ ones = paddle.ones_like(input_ids, dtype="int64") seq_length = paddle.cumsum(ones, axis=-1) position_ids = seq_length - ones position_ids.stop_gradient = True if token_type_ids is None: token_type_ids = paddle.zeros_like(input_ids, dtype="int64") input_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = input_embeddings + position_embeddings + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertPooler(nn.Layer): """ Pool the result of BertEncoder. """ def __init__(self, hidden_size, pool_act=nn.Tanh()): super().__init__() self.dense = nn.Linear(hidden_size, hidden_size) self.activation = pool_act def forward(self, hidden_states): """ Args: hidden_states(Tensor): A Tensor of hidden_states. """ first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertModel(nn.Layer): """ The bare BERT Model transformer outputting raw hidden-states. Args: bert_config(BertConfig): A BertConfig class instance with the configuration to build a new model """ def __init__(self, bert_config): super().__init__() self.bert_config = bert_config self.embeddings = BertEmbeddings(bert_config) self.fuse = True if FusedTransformerEncoderLayer is not None else False self.fuse = False if self.fuse: self.encoder = nn.LayerList([ FusedTransformerEncoderLayer( bert_config.hidden_size, bert_config.num_attention_heads, bert_config.intermediate_size, dropout_rate=bert_config.hidden_dropout_prob, activation=bert_config.hidden_act, attn_dropout_rate=bert_config.attention_probs_dropout_prob, act_dropout_rate=0.) for _ in range(bert_config.num_hidden_layers) ]) else: logging.warning( "FusedTransformerEncoderLayer is not supported by the running Paddle. " "TransformerEncoderLayer will be used.") encoder_layer = nn.TransformerEncoderLayer( bert_config.hidden_size, bert_config.num_attention_heads, bert_config.intermediate_size, dropout=bert_config.hidden_dropout_prob, activation=bert_config.hidden_act, attn_dropout=bert_config.attention_probs_dropout_prob, act_dropout=0, fuse_qkv=bert_config.fuse_mha) self.encoder = nn.TransformerEncoder(encoder_layer, bert_config.num_hidden_layers) self.pooler = BertPooler(bert_config.hidden_size) def forward(self, input_ids, token_type_ids=None, attention_mask=None): """ Args: input_ids(Tensor): A Tensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary. Data type should be `int64`. token_type_ids(Tensor, optional): An optional Tensor of shape [batch_size, sequence_length] with the token types indices selected in [0, type_vocab_size - 1]. If `type_vocab_size` is 2, indices can either be 0 or 1. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token. (see BERT paper for more details). Its data type should be `int64` Defaults: None, which means we don't add segment embeddings. attention_mask(Tensor, optional): An optional Tensor of shape [batch_size, sequence_length] with indices of mask used in multi-head attention to avoid performing attention on to some unwanted positions, usually the paddings or the subsequent positions. Its data type can be int, float and bool. When the data type is bool, the `masked` tokens have `False` values and the others have `True` values. When the data type is int, the `masked` tokens have `0` values and the others have `1` values. When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values. Defaults: None. Returns: encoder_output(Tensor): A Tensor of shape [batch_size, sequence_length, hidden_size] contains hidden-states at the last layer of the model. The data type should be float32. pooled_output(Tensor): A Tensor of shape [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLS`) to train on the Next-Sentence task (see BERT's paper). """ if attention_mask is None: attention_mask = paddle.unsqueeze( (input_ids != self.bert_config.pad_token_id).astype('int32'), axis=[1, 2]) else: if attention_mask.ndim == 2: # attention_mask [batch_size, sequence_length] -> [batch_size, 1, 1, sequence_length] attention_mask = attention_mask.unsqueeze(axis=[1, 2]) embedding_output = self.embeddings( input_ids=input_ids, token_type_ids=token_type_ids) if self.fuse: encoder_output = embedding_output for layer in self.encoder: encoder_output = layer(encoder_output, attention_mask) else: encoder_output = self.encoder(embedding_output, attention_mask) pooled_output = self.pooler(encoder_output) return encoder_output, pooled_output class BertForQuestionAnswering(nn.Layer): """ BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Args: bert_config(BertConfig): a BertConfig class instance with the configuration to build a new model. """ def __init__(self, bert_config): super().__init__() self.bert = BertModel(bert_config) self.classifier = nn.Linear(bert_config.hidden_size, 2) def forward(self, input_ids, token_type_ids=None, attention_mask=None): """ Args: See class `BertModel`. Returns: start_logits(Tensor): A tensor of shape [batch_size, sequence_length] indicates the start position token. end_logits(Tensor): A tensor of shape [batch_size, sequence_length] indicates the end position token. """ encoder_output, _ = self.bert( input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) logits = self.classifier(encoder_output) logits = paddle.transpose(logits, perm=[2, 0, 1]) start_logits, end_logits = paddle.unstack(x=logits, axis=0) return start_logits, end_logits class BertLMPredictionHead(nn.Layer): """ Bert Model with a `language modeling` head on top for CLM fine-tuning. Args: hidden_size(int): See class `BertConfig`. vocab_size(int): See class `BertConfig`. activation(str): Activation function used in the language modeling task. embedding_weights(Tensor, optional): An optional Tensor of shape [vocab_size, hidden_size] used to map hidden_states to logits of the masked token prediction. The data type should be float32. Defaults: None, which means use the same weights of the embedding layer. """ def __init__(self, hidden_size, vocab_size, activation, embedding_weights=None): super().__init__() self.transform = nn.Linear(hidden_size, hidden_size) self.activation = getattr(nn.functional, activation) self.layer_norm = nn.LayerNorm(hidden_size, epsilon=1e-12) self.decoder_weight = self.create_parameter( shape=[vocab_size, hidden_size], dtype=self.transform.weight.dtype, is_bias=False) if embedding_weights is None else embedding_weights self.decoder_bias = self.create_parameter( shape=[vocab_size], dtype=self.decoder_weight.dtype, is_bias=True) def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = paddle.tensor.matmul( hidden_states, self.decoder_weight, transpose_y=True) + self.decoder_bias return hidden_states class BertPretrainingHeads(nn.Layer): """ Perform language modeling task and next sentence classification task. Args: hidden_size(int): See class `BertConfig`. vocab_size(int): See class `BertConfig`. activation(str): Activation function used in the language modeling task. embedding_weights (Tensor, optional): An optional Tensor of shape [vocab_size, hidden_size] used to map hidden_states to logits of the masked token prediction. The data type should be float32. Defaults: None, which means use the same weights of the embedding layer. """ def __init__(self, hidden_size, vocab_size, activation, embedding_weights=None): super().__init__() self.predictions = BertLMPredictionHead(hidden_size, vocab_size, activation, embedding_weights) self.seq_relationship = nn.Linear(hidden_size, 2) def forward(self, encoder_output, pooled_output, masked_lm_labels): """ Args: sequence_output(Tensor): A Tensor of shape [batch_size, sequence_length, hidden_size] with hidden-states at the last layer of bert model. It's data type should be float32. pooled_output(Tensor): A Tensor of shape [batch_size, hidden_size] with output of first token (`[CLS]`) in sequence. We "pool" the model by simply taking the hidden state corresponding to the first token. Its data type should be float32. masked_positions(Tensor, optional): An optional tensor of shape [batch_size, mask_token_num] indicates positions to be masked in the position embedding. Its data type should be int64. Default: None Returns: prediction_scores(Tensor): A Tensor with the scores of masked token prediction. Its data type should be float32. If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size]. Otherwise, the shape is [batch_size, mask_token_num, vocab_size]. seq_relationship_score(Tensor): A Tensor of shape [batch_size, 2] with the scores of next sentence prediction. Its data type should be float32. """ sequence_flattened = paddle.index_select( encoder_output.reshape([-1, encoder_output.shape[-1]]), paddle.nonzero(masked_lm_labels.reshape([-1]) != -1).squeeze(), axis=0) prediction_scores = self.predictions(sequence_flattened) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertForPretraining(nn.Layer): """ Bert Model with pretraining tasks on top. Args: bert_config(Class BertConfig): An instance of class `BertConfig`. """ def __init__(self, bert_config): super().__init__() self.bert = BertModel(bert_config) self.cls = BertPretrainingHeads( bert_config.hidden_size, bert_config.vocab_size, bert_config.hidden_act, embedding_weights=self.bert.embeddings.word_embeddings.weight) def forward(self, input_ids, token_type_ids, attention_mask, masked_lm_labels): """ Args: input_ids(Tensor): See class `BertModel`. token_type_ids(Tensor, optional): See class `BertModel`. attention_mask(Tensor, optional): See class `BertModel`. masked_positions(Tensor, optional): See class `BertPretrainingHeads`. Returns: prediction_scores(Tensor): A Tensor with the scores of masked token prediction. Its data type should be float32. If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size]. Otherwise, its shape is [batch_size, mask_token_num, vocab_size]. seq_relationship_score(Tensor): A Tensor of shape [batch_size, 2] with the scores of next sentence prediction. Its data type should be float32. """ with paddle.static.amp.fp16_guard(): outputs = self.bert( input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output, masked_lm_labels) return prediction_scores, seq_relationship_score
PyTorch/Detection/Efficientdet/effdet
effdet
factory
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model import EfficientDet from .bench import DetBenchTrain, DetBenchPredict from .config import get_efficientdet_config from utils.utils import load_checkpoint, freeze_layers_fn def create_model( model_name, input_size=None, num_classes=None, bench_task='', pretrained=False, checkpoint_path='', checkpoint_ema=False, **kwargs): config = get_efficientdet_config(model_name) if num_classes is not None: config.num_classes = num_classes if input_size is not None: config.image_size = input_size pretrained_backbone_path = kwargs.pop('pretrained_backbone_path', '') if pretrained or checkpoint_path: pretrained_backbone_path = '' # no point in loading backbone weights strict_load = kwargs.pop('strict_load', True) redundant_bias = kwargs.pop('redundant_bias', None) if redundant_bias is not None: # override config if set to something config.redundant_bias = redundant_bias soft_nms = kwargs.pop('soft_nms', False) config.label_smoothing = kwargs.pop('label_smoothing', 0.1) remove_params = kwargs.pop('remove_params', []) freeze_layers = kwargs.pop('freeze_layers', []) config.fused_focal_loss = kwargs.pop('fused_focal_loss', False) model = EfficientDet(config, pretrained_backbone_path=pretrained_backbone_path, **kwargs) # FIXME handle different head classes / anchors and re-init of necessary layers w/ pretrained load if checkpoint_path: load_checkpoint(model, checkpoint_path, use_ema=checkpoint_ema, strict=strict_load, remove_params=remove_params) if len(freeze_layers) > 0: freeze_layers_fn(model, freeze_layers=freeze_layers) # wrap model in task specific bench if set if bench_task == 'train': model = DetBenchTrain(model, config) elif bench_task == 'predict': model = DetBenchPredict(model, config, soft_nms) return model
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ProjectionPlugin
taco2ProjectionPlugin
taco2ProjectionKernel
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ProjectionKernel.h" #include "cuda_runtime.h" #include <stdexcept> #include <string> using namespace tts; namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int PROJECTION_COL_SIZE = 512; constexpr const int WARP_SIZE = 32; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ template <typename T, int NUM_THREADS> __device__ inline T warpSum(T const initVal) { constexpr const uint32_t mask = 0xffffffff >> (WARP_SIZE - NUM_THREADS); T val = initVal; #pragma unroll for (int d = NUM_THREADS / 2; d > 0; d /= 2) { val += __shfl_down_sync(mask, val, d, NUM_THREADS); } return val; } template <typename T, int BLOCK_SIZE> __device__ T cooperativeSum(T const initVal, T* const buffer) { // first all warps reduce to single value assert(BLOCK_SIZE % WARP_SIZE == 0); assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE); T val = warpSum<T, WARP_SIZE>(initVal); if (threadIdx.x % WARP_SIZE == 0) { buffer[threadIdx.x / WARP_SIZE] = val; } __syncthreads(); if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) { val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]); } return val; } __device__ inline void sumReduce(float* const array, const int len) { for (int d = 1; d < blockDim.x; d *= 2) { if (threadIdx.x % (d * 2) == 0 && threadIdx.x + d < len) { array[threadIdx.x] += array[threadIdx.x + d]; } __syncthreads(); } } template <int INPUT_1_LENGTH, int INPUT_2_LENGTH> __global__ void projectionKernel(const float* const weights, const float* const bias, const float* const input1, const float* const input2, float* const output) { __shared__ float shared[PROJECTION_COL_SIZE]; // perform mat vec float v = 0.0f; constexpr const int inputLength = INPUT_1_LENGTH + INPUT_2_LENGTH; for (int col = threadIdx.x; col < INPUT_1_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_1_LENGTH) { v += input1[col] * weights[blockIdx.x * inputLength + col]; } } for (int col = threadIdx.x; col < INPUT_2_LENGTH; col += PROJECTION_COL_SIZE) { // load chunk if (col < INPUT_2_LENGTH) { v += input2[col] * weights[blockIdx.x * inputLength + (col + INPUT_1_LENGTH)]; } } v = cooperativeSum<float, PROJECTION_COL_SIZE>(v, shared); // add bias and write if (threadIdx.x == 0) { output[blockIdx.x] = bias[blockIdx.x] + v; } } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2ProjectionKernel::Taco2ProjectionKernel(const std::vector<float>& fcWeightsHost, const std::vector<float>& fcBiasHost, const int input1Length, const int input2Length, const int numDimension) : mInput1Length(input1Length) , mInput2Length(input2Length) , mInputLength(input1Length + input2Length) , mNumDimension(numDimension) , mWeightsDevice() , mBiasDevice() { const size_t numExpectedWeights = mInputLength * mNumDimension; const size_t numExpectedBias = mNumDimension; if (numExpectedWeights != fcWeightsHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedWeights) + " weights for FC but got " + std::to_string(fcWeightsHost.size()) + " instead."); } if (numExpectedBias != fcBiasHost.size()) { throw std::runtime_error("Expected " + std::to_string(numExpectedBias) + " biases for FC but got " + std::to_string(fcBiasHost.size()) + " instead."); } // copy up weights to GPU in row major and concatenated mWeightsDevice = CudaMemory<float>(fcWeightsHost); mBiasDevice = CudaMemory<float>(fcBiasHost); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void Taco2ProjectionKernel::execute( const float* input1Device, const float* input2Device, float* outputDevice, cudaStream_t stream) { const dim3 grid(mNumDimension); const dim3 block(PROJECTION_COL_SIZE); if (mInput1Length != 1024) { throw std::runtime_error( "Plugin is configured to only handle hidden " "input length of 1024, but got " + std::to_string(mInput1Length)); } if (mInput2Length != 512) { throw std::runtime_error( "Plugin is configured to only handle context " "input length of 512, but got " + std::to_string(mInput1Length)); } projectionKernel<1024, 512><<<grid, block, 0, stream>>>( mWeightsDevice.data(), mBiasDevice.data(), input1Device, input2Device, outputDevice); } } // namespace plugin } // namespace nvinfer1
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit
deployment_toolkit
utils
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Tuple LOGGER = logging.getLogger(__name__) def parse_server_url(server_url: str) -> Tuple[str, str, int]: DEFAULT_PORTS = {"http": 8000, "grpc": 8001} # extract protocol server_url_items = server_url.split("://") if len(server_url_items) != 2: raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001") requested_protocol, server_url = server_url_items requested_protocol = requested_protocol.lower() if requested_protocol not in DEFAULT_PORTS: raise ValueError(f"Unsupported protocol: {requested_protocol}") # extract host and port default_port = DEFAULT_PORTS[requested_protocol] server_url_items = server_url.split(":") if len(server_url_items) == 1: host, port = server_url, default_port elif len(server_url_items) == 2: host, port = server_url_items port = int(port) if port != default_port: LOGGER.warning( f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}" ) else: raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001") return requested_protocol, host, port
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner
runner
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
TensorFlow2/Recommendation/DLRM_and_DCNv2/preproc
preproc
run_spark_cpu
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ######################################################################### # File Name: run_spark_cpu.sh #!/bin/bash set -e # the environment variables to run spark job # should modify below environment variables # the data path including 1TB criteo data, day_0, day_1, ... export INPUT_PATH=${1:-'/data/dlrm/criteo'} # the output path, use for generating the dictionary and the final dataset # the output folder should have more than 300GB export OUTPUT_PATH=${2:-'/data/dlrm/spark/output'} export FREQUENCY_LIMIT=${3:-'15'} # spark local dir should have about 3TB # the temporary path used for spark shuffle write export SPARK_LOCAL_DIRS='/data/dlrm/spark/tmp' # below numbers should be adjusted according to the resource of your running environment # set the total number of CPU cores, spark can use export TOTAL_CORES=80 # set the number of executors export NUM_EXECUTORS=8 # the cores for each executor, it'll be calculated export NUM_EXECUTOR_CORES=$((${TOTAL_CORES}/${NUM_EXECUTORS})) # unit: GB, set the max memory you want to use export TOTAL_MEMORY=800 # unit: GB, set the memory for driver export DRIVER_MEMORY=32 # the memory per executor export EXECUTOR_MEMORY=$(((${TOTAL_MEMORY}-${DRIVER_MEMORY})/${NUM_EXECUTORS})) OPTS="--frequency_limit $FREQUENCY_LIMIT" export SPARK_HOME=/opt/spark export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 export PATH=$SPARK_HOME/bin:$SPARK_HOME/sbin:$PATH # we use spark standalone to run the job export MASTER=spark://$HOSTNAME:7077 echo "Starting spark standalone" start-master.sh start-slave.sh $MASTER echo "Generating the dictionary..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=1 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ spark_data_utils.py --mode generate_models \ $OPTS \ --input_folder $INPUT_PATH \ --days 0-23 \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_dict_log.txt echo "Transforming the train data from day_0 to day_22..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=1 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=600 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ spark_data_utils.py --mode transform \ --input_folder $INPUT_PATH \ --days 0-22 \ --output_folder $OUTPUT_PATH/train \ --model_size_file $OUTPUT_PATH/model_size.json \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_train_log.txt echo "Splitting the last day into 2 parts of test and validation..." last_day=$INPUT_PATH/day_23 temp_test=$OUTPUT_PATH/temp/test temp_validation=$OUTPUT_PATH/temp/validation mkdir -p $temp_test $temp_validation lines=`wc -l $last_day | awk '{print $1}'` former=$((lines / 2)) latter=$((lines - former)) head -n $former $last_day > $temp_test/day_23 tail -n $latter $last_day > $temp_validation/day_23 echo "Transforming the test data in day_23..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=1 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=30 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ spark_data_utils.py --mode transform \ --input_folder $temp_test \ --days 23-23 \ --output_folder $OUTPUT_PATH/test \ --output_ordering input \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_test_log.txt echo "Transforming the validation data in day_23..." spark-submit --master $MASTER \ --driver-memory "${DRIVER_MEMORY}G" \ --executor-cores $NUM_EXECUTOR_CORES \ --executor-memory "${EXECUTOR_MEMORY}G" \ --conf spark.cores.max=$TOTAL_CORES \ --conf spark.task.cpus=1 \ --conf spark.sql.files.maxPartitionBytes=1073741824 \ --conf spark.sql.shuffle.partitions=30 \ --conf spark.driver.maxResultSize=2G \ --conf spark.locality.wait=0s \ --conf spark.network.timeout=1800s \ spark_data_utils.py --mode transform \ --input_folder $temp_validation \ --days 23-23 \ --output_folder $OUTPUT_PATH/validation \ --output_ordering input \ --model_folder $OUTPUT_PATH/models \ --write_mode overwrite --low_mem 2>&1 | tee submit_validation_log.txt rm -r $temp_test $temp_validation
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
waveFileWriter
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_WAVEFILEWRITER_H #define TT2I_WAVEFILEWRITER_H #include <cstddef> #include <string> namespace tts { class WaveFileWriter { public: /** * @brief Write a mono sample data to a WAV file. * * @param filename The file name. * @param frequency The sample frequency. * @param data The raw data. * @param numSamples The number of samples. */ static void write(const std::string& filename, int frequency, const float* data, size_t numSamples); }; } // namespace tts #endif
PyTorch/Classification/GPUNet/triton/runner/maintainer
maintainer
maintainer_factory
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .docker.maintainer import DockerMaintainer class MaintainerFactory: @staticmethod def create_docker_maintainer(): return DockerMaintainer()
TensorFlow/Detection/SSD/models/research/object_detection/data
data
kitti_label_map
item { id: 1 name: 'car' } item { id: 2 name: 'pedestrian' }
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/bermuda
bermuda
onnx
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision # pytype: enable=import-error LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple(_get_dim(d) for d in shape.dim) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
PyTorch/Segmentation/MaskRCNN/pytorch/configs/ci
ci
e2e_mask_rcnn_R_50_FPN_1x_1gpu
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "/data3/joc_checkpoints/pytorch/maskrcnn/R-50.pkl" BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TRAIN: 4000 ROI_HEADS: USE_FPN: True ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False MASK_ON: True DATASETS: TRAIN: ("coco_2014_train", "coco_2014_valminusminival") TEST: ("coco_2014_minival",) DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.005 WEIGHT_DECAY: 0.0001 STEPS: (33000, 44000) MAX_ITER: 1000 IMS_PER_BATCH: 4 WARMUP_ITERS: 625 WARMUP_FACTOR: 0.000064 WARMUP_METHOD: "mlperf_linear" TEST: IMS_PER_BATCH: 1 PATHS_CATALOG: "maskrcnn_benchmark/config/paths_catalog_ci.py" OUTPUT_DIR: "."
TensorFlow2/Recommendation/SIM/sim/layers
layers
embedding
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf class EmbeddingInitializer(tf.keras.initializers.Initializer): def __call__(self, shape, dtype=tf.float32): maxval = tf.sqrt(tf.constant(1.) / tf.cast(shape[0], tf.float32)) maxval = tf.cast(maxval, dtype=dtype) minval = -maxval weights = tf.random.uniform(shape, minval=minval, maxval=maxval, dtype=dtype) weights = tf.cast(weights, dtype=tf.float32) return weights def get_config(self): return {} # https://github.com/NVIDIA/DeepLearningExamples/blob/81ee705868a11d6fe18c12d237abe4a08aab5fd6/TensorFlow2/Recommendation/DLRM/embedding.py#L94 class Embedding(tf.keras.layers.Layer): def __init__( self, input_dim, output_dim, *, trainable=True, embedding_name=None, initializer=EmbeddingInitializer() ): super(Embedding, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.embedding_name = ( embedding_name if embedding_name is not None else "embedding_table" ) self.embedding_table = None self.trainable = trainable self.initializer = initializer def build(self, input_shape): self.embedding_table = self.add_weight( self.embedding_name, shape=[self.input_dim, self.output_dim], dtype=tf.float32, initializer=self.initializer, trainable=self.trainable, ) @tf.function def call(self, indices): return tf.gather(params=self.embedding_table, indices=indices)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
componentTiming
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_COMPONENTTIMINGS_H #define TT2I_COMPONENTTIMINGS_H #include <ostream> #include <string> #include <vector> namespace tts { class ComponentTiming { public: /** * @brief Create a new component timing object. * * @param name The name of the component. * @param duration The time duration in seconds. */ ComponentTiming(const std::string& name, double duration); /** * @brief Add a timed sub-component. * * @param timing The timing of the sub-component. */ void addSubTiming(const ComponentTiming& timing); /** * @brief Add a timed sub-component. * * @param name The name of the sub-component. * @param duration The time duration in seconds. */ void addSubTiming(const std::string& name, double duration); /** * @brief Print out the time taken averaged over a given number of runs. * * @param stream The stream to print to. * @param numRuns The number of runs to average over. */ void print(std::ostream& stream, int numRuns) const; /** * @brief Get the name of this component. * * @return The name. */ std::string getName() const; /** * @brief Get the duration of this component in seconds. * * @return The duration. */ double getDuration() const; /** * @brief Get the timing of a sub-component. * * @param name The name of the sub-component. * * @return The timing of the sub-component. */ ComponentTiming getSubTiming(const std::string& name) const; private: std::string mName; double mDuration; std::vector<ComponentTiming> mSubTimings; /** * @brief Output the timing to stream. This will write a line like: * ``` * ComponentName: 7.32s (54.3%) * ``` * * @param level The level of indentation. * @param stream The stream to write to. * @param numRuns The number of runs to average. * @param parentTime The total time taken by the parent. */ void output(int level, std::ostream& stream, int numRuns, double parentTime = 0.0) const; }; } // namespace tts #endif
PyTorch/Detection/SSD/examples
examples
SSD300_FP16_INFERENCE_BENCHMARK
# This script launches SSD300 inference benchmark in FP16 on 1 GPU with 64 batch size # Usage bash SSD300_FP16_INFERENCE_BENCHMARK.sh <path to this repository> <path to dataset> <additional flags> python $1/main.py --backbone resnet50 --mode benchmark-inference --bs 64 --data $2 ${@:3}
Tools/PyTorch/TimeSeriesPredictionPlatform/triton
triton
run_performance_on_triton
#!/usr/bin/env python3 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import logging import os import pathlib import shutil import sys from distutils.version import LooseVersion from enum import Enum from typing import Any, Dict, List import yaml # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .deployment_toolkit.core import BatchingMode, EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool from .deployment_toolkit.model_analyzer import ModelAnalyzer, ModelAnalyzerConfig, ModelAnalyzerMode from .deployment_toolkit.perf_analyzer import PerfAnalyzer, PerfAnalyzerConfig from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.utils import parse_server_url from .deployment_toolkit.warmup import performance_evaluation_warmup LOGGER = logging.getLogger("run_performance_on_triton") if LooseVersion(sys.version) >= LooseVersion("3.8.0"): from importlib.metadata import version TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer")) else: import pkg_resources TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version) TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version) def _log_dict(title: str, dict_: Dict[str, Any]): LOGGER.info(title) for key, value in dict_.items(): LOGGER.info(f"\t{key} = {value}") def _calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def _update_performance_data(results: List, batch_size: int, performance_partial_file: str): row: Dict = {"Batch": batch_size} with open(performance_partial_file) as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = _calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row) def _model_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, model_repository: str, result_path: pathlib.Path, output_shared_memory_size: int = 102400, verbose: bool = False, ): _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "model_repository": model_repository, "result_path": result_path, "verbose": verbose, }, ) perf_analyzer_config = { "measurement-interval": measurement_interval, } if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"): perf_analyzer_config["input-data"] = [input_data] else: perf_analyzer_config["input-data"] = input_data if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): perf_analyzer_config["measurement-mode"] = measurement_mode.value perf_analyzer_config["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: perf_analyzer_config["shared-memory"] = offline_mode.value perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size if input_shapes: if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"): perf_analyzer_config["shape"] = input_shapes else: perf_analyzer_config["shape"] = input_shapes[0] LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.") if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes concurrency = [number_of_triton_instances] elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step concurrency = {"start": min_concurrency, "stop": max_concurrency, "step": step} batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") protocol, host, port = parse_server_url(server_url) checkpoints = pathlib.Path("./checkpoints") if checkpoints.is_dir(): shutil.rmtree(checkpoints.as_posix()) checkpoints.mkdir(parents=True, exist_ok=True) config = { "model_repository": model_repository, "triton_launch_mode": "remote", "run_config_search_disable": True, "perf_analyzer_flags": perf_analyzer_config, "perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h "profile_models": [model_name], "batch_sizes": batch_sizes, "concurrency": concurrency, "verbose": verbose, "checkpoint_directory": checkpoints.as_posix(), "override_output_model_repository": True, "client_protocol": protocol, f"triton_{protocol}_endpoint": f"{host}:{port}", } if verbose: _log_dict("Model Analyzer profiling configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=verbose) result_path.mkdir(parents=True, exist_ok=True) for file in checkpoints.iterdir(): if not file.is_file() or file.suffix != ".ckpt": continue LOGGER.info(f"Moving checkpoint {file.name} to {result_path}") shutil.move(file, result_path / file.name) inference_output_fields = [ "batch_size", "concurrency", "perf_throughput", "perf_latency", "perf_client_send_recv", "perf_client_response_wait", "perf_server_queue", "perf_server_compute_input", "perf_server_compute_infer", "perf_server_compute_output", ] gpu_output_fields = [ "gpu_uuid", "batch_size", "concurrency", "gpu_used_memory", "gpu_free_memory", "gpu_utilization", "gpu_power_usage", ] filename_model_inference = "metrics-model-inference.csv" filename_model_gpu = "metrics-model-gpu.csv" config = { "analysis_models": model_name, "checkpoint_directory": result_path.as_posix(), "export_path": "/tmp", "inference_output_fields": inference_output_fields, "gpu_output_fields": gpu_output_fields, "filename_model_inference": filename_model_inference, "filename_model_gpu": filename_model_gpu, "summarize": False, } if verbose: _log_dict("Model Analyzer analysis configuration", config) with open("config.yaml", "w") as file: yaml.safe_dump(config, file) config = ModelAnalyzerConfig() model_analyzer = ModelAnalyzer(config=config) model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=verbose) inference_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_inference gpu_metrics_file = pathlib.Path("/tmp") / "results" / filename_model_gpu for file in [inference_metrics_file, gpu_metrics_file]: LOGGER.info(f"Moving metrics {file.name} to {result_path}") shutil.move(file, result_path / file.name) def _perf_analyzer_evaluation( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, result_path: pathlib.Path, output_shared_memory_size: int = 102400, verbose: bool = False, ): protocol, host, port = parse_server_url(server_url) if batching_mode == BatchingMode.STATIC: batch_sizes = batch_sizes max_concurrency = 1 min_concurrency = 1 step = 1 elif batching_mode == BatchingMode.DYNAMIC: max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * number_of_triton_instances * number_of_model_instances max_concurrency = min(256, max_total_requests) step = max(1, max_concurrency // concurrency_steps) min_concurrency = step batch_sizes = [max(1, max_total_requests // 256)] else: raise ValueError(f"Unsupported batching mode: {batching_mode}") _log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "number_of_triton_instances": number_of_triton_instances, "number_of_model_instances": number_of_model_instances, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "concurrency_steps": concurrency_steps, "batching_mode": batching_mode, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "result_path": result_path, "verbose": verbose, }, ) results: List[Dict] = list() for batch_size in batch_sizes: for concurrency in range(min_concurrency, max_concurrency + step, step): performance_partial_file = f"triton_performance_{evaluation_mode.value.lower()}_{batching_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv" params = { "model-name": model_name, "model-version": 1, "batch-size": batch_size, "url": f"{host}:{port}", "protocol": protocol, "input-data": input_data, "measurement-interval": measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", "latency-report-file": performance_partial_file, } if verbose: params["extra-verbose"] = True if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = measurement_mode.value params["measurement-request-count"] = measurement_request_count if evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = offline_mode.value params["output-shared-memory-size"] = output_shared_memory_size if verbose: _log_dict(f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params) config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config) perf_analyzer.run() _update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=result_path.as_posix(), data=results) show_results(results=results) def _run_performance_analysis( server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], number_of_triton_instances: int, number_of_model_instances: int, measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, concurrency_steps: int, batching_mode: BatchingMode, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, output_shared_memory_size: int, performance_tool: PerformanceTool, model_repository: str, result_path: pathlib.Path, warmup: bool, verbose: bool, ): log_level = logging.INFO if not verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) if performance_tool == PerformanceTool.MODEL_ANALYZER: if result_path.suffix: raise ValueError( "Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results" ) elif performance_tool == PerformanceTool.PERF_ANALYZER: if result_path.suffix != ".csv": raise ValueError( "Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv" ) else: raise ValueError(f"Unsupported performance tool {performance_tool}") if warmup: LOGGER.info("Running warmup before the main test") performance_evaluation_warmup( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, ) if performance_tool == PerformanceTool.MODEL_ANALYZER: LOGGER.info("Using Model Analyzer for performance evaluation") _model_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, model_repository=model_repository, result_path=result_path, verbose=verbose, ) elif performance_tool == PerformanceTool.PERF_ANALYZER: LOGGER.info("Using Perf Analyzer for performance evaluation") _perf_analyzer_evaluation( server_url=server_url, model_name=model_name, input_data=input_data, input_shapes=input_shapes, batch_sizes=batch_sizes, number_of_triton_instances=number_of_triton_instances, number_of_model_instances=number_of_model_instances, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency_steps=concurrency_steps, batching_mode=batching_mode, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, result_path=result_path, verbose=verbose, ) else: raise ValueError(f"Unsupported performance tool {performance_tool}") class MeasurementMode(Enum): """ Available measurement stabilization modes """ COUNT_WINDOWS = "count_windows" TIME_WINDOWS = "time_windows" def main(): parser = argparse.ArgumentParser() parser.add_argument( "--server-url", type=str, required=False, default="http://127.0.0.1:8000", help="Url to Triton server", ) parser.add_argument( "--model-name", type=str, required=True, help="Name of the model to test", ) parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling.", ) parser.add_argument( "--input-shapes", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument( "--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.", ) parser.add_argument( "--number-of-triton-instances", type=int, default=1, help="Number of Triton Server instances", ) parser.add_argument( "--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server", ) parser.add_argument( "--measurement-mode", choices=[item.value for item in MeasurementMode], default=MeasurementMode.COUNT_WINDOWS.value, type=str, help="Select measurement mode " "'time_windows' stabilize performance on measurement window. " "'count_windows' stabilize performance on number of samples.", ) parser.add_argument( "--measurement-interval", required=False, help="Time window perf_analyzer will wait to stabilize the measurement", default=5000, type=int, ) parser.add_argument( "--measurement-request-count", required=False, help="Number of samples on which perf_analyzer will stabilize the measurement", default=50, type=int, ) parser.add_argument( "--concurrency-steps", help="Define number of concurrency steps used for dynamic batching tests", default=32, type=int, ) parser.add_argument( "--batching-mode", choices=[item.value for item in BatchingMode], default=BatchingMode.STATIC.value, type=str, help="Select batching mode " "'static' run static batching scenario. " "'dynamic' run dynamic batching scenario.", ) parser.add_argument( "--evaluation-mode", choices=[item.value for item in EvaluationMode], default=EvaluationMode.OFFLINE.value, type=str, help="Select evaluation mode " "'offline' run offline analysis and use GPU memory to pass tensors. " "'online' run online analysis and use HTTP protocol.", ) parser.add_argument( "--offline-mode", choices=[item.value for item in OfflineMode], default=OfflineMode.SYSTEM.value, type=str, help="Select offline mode " "'system' pass tensors through CPU RAM memory. " "'cuda' pass tensors through GPU RAM memory.", ) parser.add_argument( "--output-shared-memory-size", default=100240, type=int, help="Size of memory buffer allocated for output with dynamic shapes in bytes. " "Has to be equal to maximal size of output tensor.", ) parser.add_argument( "--performance-tool", choices=[item.value for item in PerformanceTool], default=PerformanceTool.MODEL_ANALYZER.value, type=str, help="Select performance tool for measurement mode " "'model_analyzer' use Model Analyzer " "'perf_analyzer' use Perf Analyzer", ) parser.add_argument( "--model-repository", default=None, type=str, help="Path to model repository. Valid when using Model Analyzer", ) parser.add_argument("--result-path", type=pathlib.Path, required=True, help="Path where results files is stored.") parser.add_argument( "--warmup", help="Enable model warmup before performance test", action="store_true", default=False ) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args = parser.parse_args() batch_sizes = list(map(lambda x: int(x), args.batch_sizes.split(","))) _run_performance_analysis( server_url=args.server_url, model_name=args.model_name, input_data=args.input_data, input_shapes=args.input_shapes or [], batch_sizes=batch_sizes, number_of_triton_instances=args.number_of_triton_instances, number_of_model_instances=args.number_of_model_instances, measurement_mode=MeasurementMode(args.measurement_mode), measurement_interval=args.measurement_interval, measurement_request_count=args.measurement_request_count, concurrency_steps=args.concurrency_steps, batching_mode=BatchingMode(args.batching_mode), evaluation_mode=EvaluationMode(args.evaluation_mode), offline_mode=OfflineMode(args.offline_mode), output_shared_memory_size=args.output_shared_memory_size, performance_tool=PerformanceTool(args.performance_tool), model_repository=args.model_repository, result_path=args.result_path, warmup=args.warmup, verbose=args.verbose, ) if __name__ == "__main__": main()
PyTorch/Segmentation/MaskRCNN/pytorch/configs
configs
e2e_mask_rcnn_R_50_FPN_1x
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TRAIN: 12000 ROI_HEADS: USE_FPN: True ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False MASK_ON: True DATASETS: TRAIN: ("coco_2017_train",) TEST: ("coco_2017_val",) DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.12 WEIGHT_DECAY: 0.0001 STEPS: (12000, 16000) MAX_ITER: 16667 IMS_PER_BATCH: 96 WARMUP_FACTOR: 0.0001 WARMUP_ITERS: 800 TEST: IMS_PER_BATCH: 8
Tools/DGLPyTorch/SyntheticGraphGeneration
SyntheticGraphGeneration
.gitignore
/save/ /data/ __pycache__ .ipynb_checkpoints/ *.csv *.txt dask-worker-space exp
PyTorch/Segmentation/MaskRCNN/pytorch
pytorch
train
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. #8 GPUS x 12 batch/GPU IMAGE=`docker build . --pull | tail -n 1 | awk '{print $3}'` GPU=8 NAME='MRCNN_TRAIN' CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml' PATH_TO_COCO='/home/sharath/Downloads/11419' #Location on COCO-2017 on local machine #PATH_TO_RN50 - SCRIPT assumes R-50.pth exists in PATH_TO_COCO/models/R-50.pth #Specify datasets of your choice with parameter DATASETS.TRAIN and DATASETS.TEST MOUNT_LOCATION='/datasets/coco' DOCKER_RESULTS='/results' LOGFILE='joblog.log' COMMAND="python -m torch.distributed.launch --nproc_per_node=$GPU tools/train_net.py \ --config-file $CONFIG \ DATASETS.TRAIN "(\"coco_2017_train\",)" \ DATASETS.TEST "(\"coco_2017_val\",)" \ SOLVER.BASE_LR 0.12 \ SOLVER.MAX_ITER 16667 \ SOLVER.STEPS \"(12000, 16000)\" \ SOLVER.IMS_PER_BATCH 96 \ DTYPE \"float16\" \ OUTPUT_DIR $DOCKER_RESULTS \ | tee $LOGFILE" echo $COMMAND docker run --runtime=nvidia -v $PATH_TO_COCO:/$MOUNT_LOCATION --rm --name=$NAME --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --ipc=host -t -i $IMAGE bash -c "$COMMAND"
PyTorch/Classification/GPUNet/triton/225ms-D
225ms-D
README
# Deploying the GPUNet model on Triton Inference Server This folder contains instructions for deployment to run inference on Triton Inference Server as well as a detailed performance analysis. The purpose of this document is to help you with achieving the best inference performance. ## Table of contents - [Solution overview](#solution-overview) - [Introduction](#introduction) - [Deployment process](#deployment-process) - [Setup](#setup) - [Quick Start Guide](#quick-start-guide) - [Performance](#performance) - [Offline scenario](#offline-scenario) - [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16) - [Online scenario](#online-scenario) - [Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#online-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16) - [Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#online-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16) - [Advanced](#advanced) - [Step by step deployment process](#step-by-step-deployment-process) - [Latency explanation](#latency-explanation) - [Release notes](#release-notes) - [Changelog](#changelog) - [Known issues](#known-issues) ## Solution overview ### Introduction The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. This README provides step-by-step deployment instructions for models generated during training (as described in the [model README](../readme.md)). Additionally, this README provides the corresponding deployment scripts that ensure optimal GPU utilization during inferencing on Triton Inference Server. ### Deployment process The deployment process consists of two steps: 1. Conversion. The purpose of conversion is to find the best performing model format supported by Triton Inference Server. Triton Inference Server uses a number of runtime backends such as [TensorRT](https://developer.nvidia.com/tensorrt), [LibTorch](https://github.com/triton-inference-server/pytorch_backend) and [ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend) to support various model types. Refer to the [Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton) for a list of available backends. 2. Configuration. Model configuration on Triton Inference Server, which generates necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md). After deployment Triton inference server is used for evaluation of converted model in two steps: 1. Correctness tests. Produce results which are tested against given correctness thresholds. 2. Performance tests. Produce latency and throughput results for offline (static batching) and online (dynamic batching) scenarios. All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide) ## Setup Ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [NVIDIA PyTorch NGC container 21.12](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) * [NVIDIA Triton Inference Server NGC container 21.12](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver) * [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html) * [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU ## Quick Start Guide Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples.git cd PyTorch/Classification/GPUNet ``` 2. Prepare dataset. See the [Quick Start Guide](../../README.md#prepare-the-dataset) 3. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies. ``` ./triton/scripts/docker/build.sh ./triton/scripts/docker/interactive.sh /path/to/imagenet/val/ ``` 4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU). ``` NVIDIA DGX-1 (1x V100 32GB): ./triton/225ms-D/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh NVIDIA DGX A100 (1x A100 80GB): ./triton/225ms-D/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh ``` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Offline scenario The offline scenario assumes the client and server are located on the same host. The tests uses: - tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used - single request is send from client to server with static size of batch #### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td> </tr> <tr> <td><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 357.64 | 0.05 | 0.22 | 0.08 | 0.26 | 2.17 | 0.01 | 0.00 | 2.79 | 2.83 | 2.84 | 2.87 | 2.79 | | 2 | 1 | 452.00 | 0.05 | 0.22 | 0.08 | 0.43 | 3.62 | 0.01 | 0.00 | 4.41 | 4.44 | 4.45 | 4.52 | 4.41 | | 4 | 1 | 536.00 | 0.05 | 0.23 | 0.08 | 0.74 | 6.32 | 0.01 | 0.00 | 7.42 | 7.46 | 7.47 | 7.50 | 7.42 | | 8 | 1 | 592.00 | 0.05 | 0.22 | 0.08 | 1.36 | 11.64 | 0.01 | 0.00 | 13.35 | 13.41 | 13.42 | 13.45 | 13.35 | | 16 | 1 | 640.00 | 0.05 | 0.23 | 0.08 | 2.60 | 21.80 | 0.01 | 0.00 | 24.76 | 24.84 | 24.89 | 24.93 | 24.76 | | 32 | 1 | 640.00 | 0.05 | 0.26 | 0.06 | 5.25 | 42.06 | 0.02 | 0.00 | 47.69 | 47.88 | 47.93 | 48.11 | 47.70 | | 64 | 1 | 640.00 | 0.06 | 0.37 | 0.09 | 12.11 | 82.00 | 0.05 | 0.00 | 94.81 | 95.06 | 95.17 | 95.17 | 94.68 | </details> #### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_batch.png"></td> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/throughput_vs_latency.png"></td> </tr> <tr> <td><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_offline_2/plots/latency_vs_batch.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 592.00 | 0.02 | 0.07 | 0.02 | 0.17 | 1.40 | 0.00 | 0.00 | 1.68 | 1.70 | 1.72 | 1.76 | 1.68 | | 2 | 1 | 798.00 | 0.02 | 0.07 | 0.02 | 0.28 | 2.11 | 0.00 | 0.00 | 2.50 | 2.56 | 2.57 | 2.60 | 2.50 | | 4 | 1 | 964.00 | 0.02 | 0.07 | 0.02 | 0.48 | 3.55 | 0.00 | 0.00 | 4.13 | 4.21 | 4.23 | 4.31 | 4.14 | | 8 | 1 | 1008.00 | 0.02 | 0.11 | 0.03 | 1.17 | 6.54 | 0.01 | 0.00 | 7.87 | 7.96 | 7.97 | 8.03 | 7.88 | | 16 | 1 | 1024.00 | 0.03 | 0.11 | 0.03 | 2.86 | 12.38 | 0.02 | 0.00 | 15.42 | 15.47 | 15.49 | 15.50 | 15.42 | | 32 | 1 | 1056.00 | 0.03 | 0.13 | 0.03 | 5.48 | 23.76 | 0.02 | 0.00 | 29.44 | 29.50 | 29.52 | 29.55 | 29.44 | | 64 | 1 | 1088.00 | 0.03 | 0.13 | 0.03 | 9.76 | 46.28 | 0.03 | 0.00 | 56.87 | 57.09 | 57.14 | 57.26 | 56.25 | </details> ### Online scenario The online scenario assumes the client and server are located on different hosts. The tests uses: - tensors are passed through HTTP from client to server - concurrent requests are send from client to server, the final batch is created on server side #### Online: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX-1 (1x V100 32GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx-1_(1x_v100_32gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 8 | 394.80 | 0.22 | 1.98 | 11.97 | 0.45 | 5.54 | 0.01 | 0.00 | 20.13 | 25.20 | 25.35 | 25.48 | 20.17 | | 1 | 16 | 483.50 | 0.23 | 4.07 | 17.86 | 1.45 | 9.25 | 0.02 | 0.00 | 35.15 | 37.66 | 38.16 | 39.77 | 32.88 | | 1 | 24 | 494.50 | 0.42 | 8.57 | 22.09 | 3.54 | 13.37 | 0.03 | 0.00 | 49.03 | 63.12 | 65.48 | 71.17 | 48.02 | | 1 | 32 | 511.00 | 0.46 | 10.33 | 27.72 | 4.68 | 16.87 | 0.03 | 0.00 | 62.97 | 69.96 | 70.84 | 77.58 | 60.09 | | 1 | 40 | 512.00 | 0.49 | 16.81 | 30.52 | 6.80 | 20.82 | 0.05 | 0.00 | 80.14 | 98.04 | 104.66 | 113.47 | 75.49 | | 1 | 48 | 513.49 | 0.52 | 25.32 | 26.75 | 10.11 | 28.60 | 0.06 | 0.00 | 98.71 | 137.52 | 142.02 | 142.38 | 91.35 | | 1 | 56 | 511.00 | 0.64 | 28.44 | 32.14 | 12.92 | 30.57 | 0.07 | 0.00 | 125.78 | 127.50 | 128.17 | 131.06 | 104.78 | | 1 | 64 | 541.00 | 0.53 | 27.44 | 41.96 | 10.95 | 32.38 | 0.06 | 0.00 | 124.61 | 147.15 | 149.93 | 150.67 | 113.33 | | 1 | 72 | 546.00 | 0.58 | 27.71 | 46.25 | 13.81 | 38.06 | 0.07 | 0.00 | 125.61 | 180.75 | 187.81 | 189.66 | 126.49 | | 1 | 80 | 527.00 | 0.54 | 29.70 | 54.12 | 14.68 | 41.82 | 0.08 | 0.00 | 143.30 | 190.64 | 201.65 | 203.69 | 140.94 | | 1 | 88 | 508.00 | 0.83 | 25.69 | 61.55 | 17.04 | 50.94 | 0.08 | 0.00 | 149.03 | 176.09 | 217.93 | 218.31 | 156.14 | | 1 | 96 | 560.00 | 0.72 | 34.51 | 56.07 | 18.74 | 53.09 | 0.10 | 0.00 | 168.39 | 215.79 | 218.79 | 219.80 | 163.23 | | 1 | 104 | 528.00 | 0.67 | 44.94 | 57.91 | 23.12 | 51.57 | 0.11 | 0.00 | 220.06 | 229.40 | 242.34 | 243.38 | 178.33 | | 1 | 112 | 562.00 | 0.76 | 33.78 | 75.07 | 17.79 | 51.63 | 0.10 | 0.00 | 176.99 | 223.75 | 247.24 | 247.89 | 179.12 | | 1 | 120 | 545.00 | 0.64 | 39.43 | 76.38 | 22.92 | 57.66 | 0.12 | 0.00 | 194.96 | 283.54 | 293.95 | 295.63 | 197.16 | | 1 | 128 | 558.00 | 0.77 | 38.16 | 88.39 | 18.62 | 54.24 | 0.11 | 0.00 | 192.54 | 248.47 | 288.30 | 290.40 | 200.29 | | 1 | 136 | 538.00 | 0.89 | 50.60 | 77.52 | 25.45 | 68.08 | 0.17 | 0.00 | 220.09 | 284.54 | 294.65 | 294.90 | 222.71 | | 1 | 144 | 534.00 | 0.59 | 49.03 | 87.53 | 26.85 | 79.59 | 0.16 | 0.00 | 297.19 | 306.93 | 307.28 | 308.33 | 243.74 | | 1 | 152 | 588.00 | 0.79 | 26.11 | 119.83 | 20.49 | 68.73 | 0.12 | 0.00 | 234.27 | 304.38 | 311.12 | 312.06 | 236.08 | | 1 | 160 | 527.00 | 0.68 | 54.55 | 107.78 | 25.93 | 72.68 | 0.17 | 0.00 | 288.26 | 322.57 | 333.32 | 333.96 | 261.78 | | 1 | 168 | 535.00 | 0.86 | 47.44 | 107.55 | 26.95 | 79.84 | 0.15 | 0.00 | 263.82 | 326.42 | 375.91 | 376.99 | 262.79 | | 1 | 176 | 534.47 | 0.82 | 36.78 | 155.22 | 23.25 | 60.96 | 0.14 | 0.00 | 292.28 | 323.92 | 324.37 | 342.94 | 277.18 | | 1 | 184 | 534.00 | 0.91 | 31.66 | 143.39 | 25.71 | 78.13 | 0.14 | 0.00 | 268.65 | 323.83 | 331.44 | 333.50 | 279.94 | | 1 | 192 | 458.00 | 0.92 | 33.42 | 152.41 | 33.19 | 90.85 | 0.16 | 0.00 | 317.25 | 386.27 | 386.62 | 386.86 | 310.95 | | 1 | 200 | 500.00 | 1.04 | 48.76 | 150.77 | 32.09 | 92.64 | 0.16 | 0.00 | 317.27 | 430.30 | 450.96 | 453.12 | 325.46 | | 1 | 208 | 534.00 | 0.96 | 56.61 | 157.52 | 27.52 | 74.40 | 0.15 | 0.00 | 312.92 | 377.62 | 378.97 | 380.08 | 317.16 | | 1 | 216 | 521.00 | 1.06 | 45.53 | 169.89 | 29.81 | 81.20 | 0.15 | 0.00 | 321.49 | 396.82 | 401.63 | 402.69 | 327.64 | | 1 | 224 | 457.00 | 1.37 | 43.29 | 197.18 | 39.17 | 96.67 | 0.16 | 0.00 | 374.58 | 428.44 | 438.04 | 439.82 | 377.84 | | 1 | 232 | 472.00 | 1.09 | 61.50 | 172.57 | 36.75 | 98.89 | 0.16 | 0.00 | 391.89 | 432.47 | 437.79 | 444.23 | 370.95 | | 1 | 240 | 491.00 | 0.86 | 55.80 | 205.28 | 41.39 | 87.48 | 0.16 | 0.00 | 404.10 | 439.75 | 458.02 | 461.24 | 390.97 | | 1 | 248 | 486.51 | 0.70 | 81.07 | 187.71 | 39.81 | 94.90 | 0.16 | 0.00 | 417.31 | 438.25 | 466.65 | 467.05 | 404.36 | | 1 | 256 | 541.00 | 1.03 | 67.13 | 190.53 | 33.08 | 95.01 | 0.17 | 0.00 | 386.18 | 443.79 | 464.99 | 465.30 | 386.95 | </details> #### Online: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA DGX A100 (1x A100 80GB) | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |64 | | Number of model instances |2| | Export Format | ONNX | | Device Kind | gpu | | Torch Jit | none | <table> <tbody> <tr> <td colspan="2" align="center"><img src="./reports/nvidia_dgx_a100_(1x_a100_80gb)_experiment_2_triton_performance_online_2/plots/latency_vs_concurrency.png"></td> </tr> </tbody> </table> <details> <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 8 | 740.00 | 0.24 | 1.71 | 5.70 | 0.31 | 2.76 | 0.01 | 0.00 | 10.68 | 12.39 | 13.02 | 13.97 | 10.73 | | 1 | 16 | 820.00 | 0.33 | 3.65 | 9.07 | 1.00 | 5.25 | 0.02 | 0.00 | 19.58 | 25.14 | 26.46 | 29.94 | 19.32 | | 1 | 24 | 853.00 | 0.30 | 6.96 | 10.28 | 2.06 | 7.87 | 0.03 | 0.00 | 27.40 | 40.71 | 44.75 | 47.74 | 27.50 | | 1 | 32 | 880.00 | 0.44 | 10.85 | 11.54 | 2.83 | 9.73 | 0.04 | 0.00 | 39.85 | 47.65 | 48.58 | 50.74 | 35.41 | | 1 | 40 | 922.00 | 0.32 | 10.93 | 15.74 | 3.51 | 11.94 | 0.04 | 0.00 | 43.51 | 64.11 | 67.80 | 72.25 | 42.48 | | 1 | 48 | 925.00 | 0.29 | 18.18 | 12.30 | 5.05 | 15.26 | 0.06 | 0.00 | 62.62 | 65.16 | 65.56 | 67.93 | 51.14 | | 1 | 56 | 947.00 | 0.31 | 16.32 | 20.65 | 5.34 | 15.38 | 0.06 | 0.00 | 61.82 | 73.49 | 78.44 | 81.74 | 58.06 | | 1 | 64 | 941.00 | 0.26 | 20.09 | 20.02 | 5.87 | 18.87 | 0.07 | 0.00 | 72.07 | 82.85 | 85.56 | 95.01 | 65.17 | | 1 | 72 | 972.00 | 0.31 | 22.91 | 21.08 | 7.07 | 21.14 | 0.08 | 0.00 | 81.38 | 97.68 | 98.61 | 99.52 | 72.59 | | 1 | 80 | 942.00 | 0.26 | 25.08 | 25.34 | 7.85 | 22.30 | 0.08 | 0.00 | 93.11 | 105.75 | 107.86 | 108.66 | 80.90 | | 1 | 88 | 957.00 | 0.36 | 22.82 | 31.03 | 8.55 | 24.84 | 0.08 | 0.00 | 93.79 | 111.73 | 115.51 | 130.56 | 87.68 | | 1 | 96 | 935.00 | 0.48 | 19.96 | 36.06 | 10.40 | 28.40 | 0.08 | 0.00 | 105.06 | 121.62 | 124.43 | 130.20 | 95.38 | | 1 | 104 | 963.00 | 0.53 | 19.26 | 37.98 | 11.56 | 32.53 | 0.10 | 0.00 | 107.48 | 134.90 | 142.31 | 148.30 | 101.96 | | 1 | 112 | 978.00 | 0.48 | 21.18 | 44.75 | 9.26 | 28.77 | 0.08 | 0.00 | 107.36 | 133.26 | 146.77 | 149.03 | 104.53 | | 1 | 120 | 969.00 | 0.39 | 23.07 | 43.39 | 10.69 | 33.87 | 0.10 | 0.00 | 118.78 | 138.81 | 153.00 | 155.47 | 111.52 | | 1 | 128 | 973.00 | 0.36 | 39.72 | 32.80 | 14.85 | 38.92 | 0.12 | 0.00 | 144.51 | 153.19 | 154.08 | 157.04 | 126.77 | | 1 | 136 | 947.00 | 0.52 | 21.72 | 48.03 | 14.27 | 42.88 | 0.13 | 0.00 | 124.35 | 170.72 | 175.54 | 176.25 | 127.56 | | 1 | 144 | 938.00 | 0.46 | 25.39 | 49.73 | 17.86 | 47.05 | 0.13 | 0.00 | 177.81 | 182.01 | 183.39 | 183.77 | 140.62 | | 1 | 152 | 988.00 | 0.88 | 22.59 | 64.36 | 14.08 | 38.35 | 0.11 | 0.00 | 138.49 | 167.03 | 171.27 | 181.38 | 140.36 | | 1 | 160 | 955.00 | 0.37 | 40.02 | 49.30 | 16.71 | 45.36 | 0.13 | 0.00 | 165.80 | 195.73 | 201.11 | 202.00 | 151.89 | | 1 | 168 | 996.00 | 0.45 | 33.74 | 57.75 | 15.81 | 44.01 | 0.13 | 0.00 | 153.19 | 184.83 | 198.88 | 199.72 | 151.88 | | 1 | 176 | 1039.00 | 0.44 | 23.42 | 72.30 | 14.80 | 45.83 | 0.13 | 0.00 | 153.21 | 189.59 | 210.38 | 220.08 | 156.92 | | 1 | 184 | 944.00 | 0.43 | 35.13 | 70.02 | 17.09 | 50.08 | 0.13 | 0.00 | 184.89 | 227.64 | 234.29 | 234.97 | 172.87 | | 1 | 192 | 970.00 | 0.50 | 29.45 | 71.59 | 18.09 | 56.22 | 0.12 | 0.00 | 174.53 | 232.46 | 242.64 | 244.82 | 175.98 | | 1 | 200 | 982.00 | 0.79 | 21.46 | 84.92 | 19.58 | 57.29 | 0.15 | 0.00 | 181.26 | 239.74 | 240.14 | 242.91 | 184.18 | | 1 | 208 | 1040.00 | 0.44 | 40.28 | 71.11 | 18.28 | 56.21 | 0.15 | 0.00 | 195.54 | 227.27 | 233.56 | 259.94 | 186.47 | | 1 | 216 | 932.00 | 0.61 | 29.16 | 89.66 | 20.97 | 57.24 | 0.23 | 0.00 | 199.10 | 244.75 | 257.94 | 288.15 | 197.87 | | 1 | 224 | 1036.00 | 0.36 | 36.80 | 80.99 | 17.31 | 58.04 | 0.15 | 0.00 | 196.15 | 235.40 | 240.68 | 254.10 | 193.65 | | 1 | 232 | 1033.00 | 0.43 | 36.77 | 101.26 | 15.44 | 45.74 | 0.12 | 0.00 | 209.51 | 230.41 | 240.17 | 247.71 | 199.75 | | 1 | 240 | 908.00 | 0.62 | 32.32 | 105.65 | 23.40 | 63.21 | 0.16 | 0.00 | 225.70 | 253.95 | 258.04 | 258.47 | 225.36 | | 1 | 248 | 992.00 | 0.39 | 42.24 | 99.04 | 21.39 | 60.67 | 0.18 | 0.00 | 226.01 | 264.17 | 311.15 | 328.45 | 223.90 | | 1 | 256 | 1012.00 | 0.37 | 48.91 | 94.14 | 20.70 | 59.91 | 0.19 | 0.00 | 225.17 | 275.34 | 300.56 | 303.33 | 224.22 | </details> ## Advanced | Inference runtime | Mnemonic used in scripts | |-------------------|--------------------------| | [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` | | [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` | | [ONNX](https://onnx.ai) | `onnx` | | [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` | ### Step by step deployment process Commands described below can be used for exporting, converting and profiling the model. #### Clone Repository IMPORTANT: This step is executed on the host computer. <details> <summary>Clone Repository Command</summary> ```shell git clone https://github.com/NVIDIA/DeepLearningExamples.git cd PyTorch/Classification/GPUNet ``` </details> #### Start Triton Inference Server Setup the environment in the host computer and start Triton Inference Server. <details> <summary>Setup Environment and Start Triton Inference Server Command</summary> ```shell source ./triton/scripts/setup_environment.sh ./triton/scripts/docker/triton_inference_server.sh ``` </details> #### Prepare Dataset. Please use the data download from the [Main QSG](../../README.md#prepare-the-dataset) #### Prepare Checkpoint Please download a checkpoint from [here](https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d2_pyt_ckpt/versions/21.12.0_amp/zip) and place it in `runner_workspace/checkpoints/2.25ms-D/`. Note that the `2.25ms-D` subdirectory may not be created yet. #### Setup Container Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies. <details> <summary>Setup Container Command</summary> Build container: ```shell ./triton/scripts/docker/build.sh ``` Run container in interactive mode: ```shell ./triton/scripts/docker/interactive.sh /path/to/imagenet/val/ ``` Setup environment in order to share artifacts in steps and with Triton Inference Server: ```shell source ./triton/scripts/setup_environment.sh ``` </details> #### Prepare configuration You can use the environment variables to set the parameters of your inference configuration. Example values of some key variables in one configuration: <details> <summary>Export Variables</summary> ```shell export FORMAT="onnx" export PRECISION="fp16" export EXPORT_FORMAT="onnx" export EXPORT_PRECISION="fp16" export BACKEND_ACCELERATOR="trt" export NUMBER_OF_MODEL_INSTANCES="2" export TENSORRT_CAPTURE_CUDA_GRAPH="0" export CHECKPOINT="2.25ms-D" export CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT} ``` </details> #### Export Model Export model from Python source to desired format (e.g. Savedmodel or TorchScript) <details> <summary>Export Model Command</summary> ```shell if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --ignore-unknown-parameters \ --onnx-opset 13 \ --torch-jit none \ \ --config /workspace/gpunet/configs/batch1/GV100/2.25ms-D.json \ --checkpoint ${CHECKPOINT_DIR}/2.25ms-D.pth.tar \ --precision ${EXPORT_PRECISION} \ \ --dataloader triton/dataloader.py \ --val-path ${DATASETS_DIR}/ \ --is-prunet False \ --batch-size 1 ``` </details> #### Convert Model Convert the model from training to inference format (e.g. TensorRT). <details> <summary>Convert Model Command</summary> ```shell if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --max-batch-size 64 \ --container-version 21.12 \ --max-workspace-size 10000000000 \ --atol OUTPUT__0=100 \ --rtol OUTPUT__0=100 ``` </details> #### Deploy Model Configure the model on Triton Inference Server. Generate the configuration from your model repository. <details> <summary>Deploy Model Command</summary> ```shell model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${FORMAT} \ --model-control-mode explicit \ --load-model \ --load-model-timeout-s 100 \ --verbose \ \ --backend-accelerator ${BACKEND_ACCELERATOR} \ --tensorrt-precision ${PRECISION} \ --tensorrt-capture-cuda-graph \ --tensorrt-max-workspace-size 10000000000 \ --max-batch-size 64 \ --batching dynamic \ --preferred-batch-sizes 64 \ --engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES} ``` </details> #### Triton Performance Offline Test We want to maximize throughput. It assumes you have your data available for inference or that your data saturate to maximum batch size quickly. Triton Inference Server supports offline scenarios with static batching. Static batching allows inference requests to be served as they are received. The largest improvements to throughput come from increasing the batch size due to efficiency gains in the GPU with larger batches. <details> <summary>Triton Performance Offline Test Command</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 2 4 8 16 32 64 \ --concurrency 1 \ --evaluation-mode offline \ --measurement-request-count 10 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv ``` </details> #### Triton Performance Online Test We want to maximize throughput within latency budget constraints. Dynamic batching is a feature of Triton Inference Server that allows inference requests to be combined by the server, so that a batch is created dynamically, resulting in a reduced average latency. <details> <summary>Triton Performance Online Test</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 \ --concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \ --evaluation-mode online \ --measurement-request-count 500 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_online.csv ``` </details> ### Latency explanation A typical Triton Inference Server pipeline can be broken down into the following steps: 1. The client serializes the inference request into a message and sends it to the server (Client Send). 2. The message travels over the network from the client to the server (Network). 3. The message arrives at the server and is deserialized (Server Receive). 4. The request is placed on the queue (Server Queue). 5. The request is removed from the queue and computed (Server Compute). 6. The completed request is serialized in a message and sent back to the client (Server Send). 7. The completed message then travels over the network from the server to the client (Network). 8. The completed message is deserialized by the client and processed as a completed inference request (Client Receive). Generally, for local clients, steps 1-4 and 6-8 will only occupy a small fraction of time, compared to step 5. In distributed systems and online processing where client and server side are connect through network, the send and receive steps might have impact on overall processing performance. In order to analyze the possible bottlenecks the detailed charts are presented in online scenario cases. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog May 2022 - Initial release ### Known issues - There are no known issues with this model.
TensorFlow2/Detection/Efficientdet/model
model
callback_builder
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Callback related utils.""" from concurrent import futures import os from mpi4py import MPI import time import numpy as np import tensorflow as tf import horovod.tensorflow.keras.callbacks as hvd_callbacks from tensorflow_addons.optimizers import MovingAverage from typeguard import typechecked from typing import Any, List, MutableMapping, Text from model import inference, optimizer_builder from utils import model_utils from model import efficientdet_keras, coco_metric, label_util, postprocess from utils.horovod_utils import get_world_size, is_main_process class DisplayCallback(tf.keras.callbacks.Callback): """Display inference result callback.""" def __init__(self, sample_image, output_dir, update_freq=1): super().__init__() image_file = tf.io.read_file(sample_image) self.sample_image = tf.expand_dims( tf.image.decode_jpeg(image_file, channels=3), axis=0) self.executor = futures.ThreadPoolExecutor(max_workers=1) self.update_freq = update_freq self.output_dir = output_dir def set_model(self, model: tf.keras.Model): self.train_model = model with tf.device('/cpu:0'): self.model = efficientdet_keras.EfficientDetModel(config=model.config) height, width = model_utils.parse_image_size(model.config.image_size) self.model.build((1, height, width, 3)) self.file_writer = tf.summary.create_file_writer(self.output_dir) self.min_score_thresh = self.model.config.nms_configs['score_thresh'] or 0.4 self.max_boxes_to_draw = ( self.model.config.nms_configs['max_output_size'] or 100) def on_epoch_end(self, epoch, logs=None): if epoch % self.update_freq == 0: self.executor.submit(self.draw_inference, epoch) @tf.function def inference(self): return self.model(self.sample_image, training=False) def draw_inference(self, epoch): self.model.set_weights(self.train_model.get_weights()) boxes, scores, classes, valid_len = self.inference() length = valid_len[0] image = inference.visualize_image( self.sample_image[0], boxes[0].numpy()[:length], classes[0].numpy().astype(np.int)[:length], scores[0].numpy()[:length], label_map=self.model.config.label_map, min_score_thresh=self.min_score_thresh, max_boxes_to_draw=self.max_boxes_to_draw) with self.file_writer.as_default(): tf.summary.image('Test image', tf.expand_dims(image, axis=0), step=epoch) class BatchTimestamp(object): """A structure to store batch time stamp.""" def __init__(self, batch_index, timestamp): self.batch_index = batch_index self.timestamp = timestamp def __repr__(self): return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format( self.batch_index, self.timestamp) class TimeHistory(tf.keras.callbacks.Callback): """Callback for Keras models.""" def __init__(self, batch_size, logger, log_steps=1, logdir=None): """Callback for logging performance. Args: batch_size: Total batch size. log_steps: Interval of steps between logging of batch level stats. logdir: Optional directory to write TensorBoard summaries. """ # TODO(wcromar): remove this parameter and rely on `logs` parameter of # on_train_batch_end() self.batch_size = batch_size super(TimeHistory, self).__init__() self.log_steps = log_steps self.last_log_step = 0 self.steps_before_epoch = 0 self.steps_in_epoch = 0 self.start_time = None self.logger = logger self.step_per_epoch = 0 if logdir: self.summary_writer = tf.summary.create_file_writer(logdir) else: self.summary_writer = None # Logs start of step 1 then end of each step based on log_steps interval. self.timestamp_log = [] # Records the time each epoch takes to run from start to finish of epoch. self.epoch_runtime_log = [] self.latency = [] self.throughput = [] @property def global_steps(self): """The current 1-indexed global step.""" return self.steps_before_epoch + self.steps_in_epoch @property def average_steps_per_second(self): """The average training steps per second across all epochs.""" return (self.global_steps - self.step_per_epoch) / sum(self.epoch_runtime_log[1:]) @property def average_examples_per_second(self): """The average number of training examples per second across all epochs.""" # return self.average_steps_per_second * self.batch_size ind = int(0.1*len(self.throughput)) return sum(self.throughput[ind:])/(len(self.throughput[ind:])) @property def average_time_per_iteration(self): """The average time per iteration in seconds across all epochs.""" ind = int(0.1*len(self.latency)) return sum(self.latency[ind:])/(len(self.latency[ind:])) def on_train_end(self, logs=None): self.train_finish_time = time.time() if self.summary_writer: self.summary_writer.flush() def on_epoch_begin(self, epoch, logs=None): self.epoch_start = time.time() def on_batch_begin(self, batch, logs=None): if not self.start_time: self.start_time = time.time() # Record the timestamp of the first global step if not self.timestamp_log: self.timestamp_log.append(BatchTimestamp(self.global_steps, self.start_time)) def on_batch_end(self, batch, logs=None): """Records elapse time of the batch and calculates examples per second.""" self.steps_in_epoch = batch + 1 steps_since_last_log = self.global_steps - self.last_log_step if steps_since_last_log >= self.log_steps: now = time.time() elapsed_time = now - self.start_time steps_per_second = steps_since_last_log / elapsed_time examples_per_second = steps_per_second * self.batch_size self.timestamp_log.append(BatchTimestamp(self.global_steps, now)) elapsed_time_str='{:.2f} seconds'.format(elapsed_time) self.logger.log(step='PARAMETER', data={'Latency': elapsed_time_str, 'fps': examples_per_second, 'steps': (self.last_log_step, self.global_steps)}) self.logger.flush() if self.summary_writer: with self.summary_writer.as_default(): tf.summary.scalar('global_step/sec', steps_per_second, self.global_steps) tf.summary.scalar('examples/sec', examples_per_second, self.global_steps) self.last_log_step = self.global_steps self.start_time = None self.latency.append(elapsed_time) self.throughput.append(examples_per_second) def on_epoch_end(self, epoch, logs=None): if epoch == 0: self.step_per_epoch = self.steps_in_epoch epoch_run_time = time.time() - self.epoch_start self.epoch_runtime_log.append(epoch_run_time) self.steps_before_epoch += self.steps_in_epoch self.steps_in_epoch = 0 class LRTensorBoard(tf.keras.callbacks.Callback): def __init__(self, log_dir, **kwargs): super().__init__(**kwargs) self.summary_writer = tf.summary.create_file_writer(log_dir) self.steps_before_epoch = 0 self.steps_in_epoch = 0 @property def global_steps(self): """The current 1-indexed global step.""" return self.steps_before_epoch + self.steps_in_epoch def on_batch_end(self, batch, logs=None): self.steps_in_epoch = batch + 1 lr = self.model.optimizer.lr(self.global_steps) with self.summary_writer.as_default(): summary = tf.summary.scalar('learning_rate', lr, self.global_steps) def on_epoch_end(self, epoch, logs=None): self.steps_before_epoch += self.steps_in_epoch self.steps_in_epoch = 0 def on_train_end(self, logs=None): self.summary_writer.flush() class LoggingCallback(tf.keras.callbacks.Callback): def on_train_batch_end(self, batch, logs=None): print("Iter: {}".format(batch)) for var in self.model.variables: # if 'dense' in var.name: # continue print("Var: {} {}".format(var.name, var.value)) try: slot = self.model.optimizer.get_slot(var, "average") print("Avg: {}".format(slot)) except KeyError as e: print("{} does not have ema average slot".format(var.name)) def fetch_optimizer(model,opt_type) -> tf.keras.optimizers.Optimizer: """Get the base optimizer used by the current model.""" # this is the case where our target optimizer is not wrapped by any other optimizer(s) if isinstance(model.optimizer,opt_type): return model.optimizer # Dive into nested optimizer object until we reach the target opt opt = model.optimizer while hasattr(opt, '_optimizer'): opt = opt._optimizer if isinstance(opt,opt_type): return opt raise TypeError(f'Failed to find {opt_type} in the nested optimizer object') class MovingAverageCallback(tf.keras.callbacks.Callback): """A Callback to be used with a `MovingAverage` optimizer. Applies moving average weights to the model during validation time to test and predict on the averaged weights rather than the current model weights. Once training is complete, the model weights will be overwritten with the averaged weights (by default). Attributes: overwrite_weights_on_train_end: Whether to overwrite the current model weights with the averaged weights from the moving average optimizer. **kwargs: Any additional callback arguments. """ def __init__(self, overwrite_weights_on_train_end: bool = False, **kwargs): super(MovingAverageCallback, self).__init__(**kwargs) self.overwrite_weights_on_train_end = overwrite_weights_on_train_end self.ema_opt = None def set_model(self, model: tf.keras.Model): super(MovingAverageCallback, self).set_model(model) self.ema_opt = fetch_optimizer(model, MovingAverage) self.ema_opt.shadow_copy(self.model.weights) def on_test_begin(self, logs: MutableMapping[Text, Any] = None): self.ema_opt.swap_weights() def on_test_end(self, logs: MutableMapping[Text, Any] = None): self.ema_opt.swap_weights() def on_train_end(self, logs: MutableMapping[Text, Any] = None): if self.overwrite_weights_on_train_end: self.ema_opt.assign_average_vars(self.model.variables) class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint): """Saves and, optionally, assigns the averaged weights. Taken from tfa.callbacks.AverageModelCheckpoint [original class]. NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed) NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer) Attributes: update_weights: If True, assign the moving average weights to the model, and save them. If False, keep the old non-averaged weights, but the saved model uses the average weights. See `tf.keras.callbacks.ModelCheckpoint` for the other args. """ def __init__( self, update_weights: bool, filepath: str, monitor: str = 'val_loss', verbose: int = 0, save_best_only: bool = False, save_weights_only: bool = False, mode: str = 'auto', save_freq: str = 'epoch', **kwargs): super().__init__( filepath, monitor, verbose, save_best_only, save_weights_only, mode, save_freq, **kwargs) self.update_weights = update_weights self.ema_opt = None def set_model(self, model): self.ema_opt = fetch_optimizer(model, MovingAverage) return super().set_model(model) def _save_model(self, epoch, batch, logs): assert isinstance(self.ema_opt, MovingAverage) if self.update_weights: self.ema_opt.assign_average_vars(self.model.variables) return super()._save_model(epoch, batch, logs) else: # Note: `model.get_weights()` gives us the weights (non-ref) # whereas `model.variables` returns references to the variables. non_avg_weights = self.model.get_weights() self.ema_opt.assign_average_vars(self.model.variables) # result is currently None, since `super._save_model` doesn't # return anything, but this may change in the future. result = super()._save_model(epoch, batch, logs) self.model.set_weights(non_avg_weights) return result class StopEarlyCallback(tf.keras.callbacks.Callback): def __init__(self, num_epochs, stop_75, **kwargs): super(StopEarlyCallback, self).__init__(**kwargs) self.num_epochs = num_epochs self.stop_75 = stop_75 def on_epoch_end(self, epoch, logs=None): if ((epoch + 1) > (0.75 * self.num_epochs) and self.stop_75) or ((epoch + 1) == 300): self.model.stop_training = True class COCOEvalCallback(tf.keras.callbacks.Callback): def __init__(self, eval_dataset, eval_freq, start_eval_epoch, eval_params, logger, **kwargs): super(COCOEvalCallback, self).__init__(**kwargs) self.dataset = eval_dataset self.eval_freq = eval_freq self.start_eval_epoch = start_eval_epoch self.eval_params = eval_params self.ema_opt = None self.logger = logger label_map = label_util.get_label_map(eval_params['label_map']) self.evaluator = coco_metric.EvaluationMetric( filename=eval_params['val_json_file'], label_map=label_map) self.pbar = tf.keras.utils.Progbar(eval_params['num_samples']) def set_model(self, model): self.ema_opt = fetch_optimizer(model, MovingAverage) return super().set_model(model) @tf.function def eval_model_fn(self, images, labels): cls_outputs, box_outputs = self.model(images, training=False) detections = postprocess.generate_detections(self.eval_params, cls_outputs, box_outputs, labels['image_scales'], labels['source_ids']) tf.numpy_function(self.evaluator.update_state, [labels['groundtruth_data'], postprocess.transform_detections(detections)], []) def evaluate(self, epoch): if self.eval_params['moving_average_decay'] > 0: self.ema_opt.swap_weights() # get ema weights self.evaluator.reset_states() # evaluate all images. for i, (images, labels) in enumerate(self.dataset): self.eval_model_fn(images, labels) if is_main_process(): self.pbar.update(i) # gather detections from all ranks self.evaluator.gather() # compute the final eval results. if is_main_process(): metrics = self.evaluator.result() metric_dict = {} for i, name in enumerate(self.evaluator.metric_names): metric_dict[name] = metrics[i] # csv format csv_metrics = ['AP','AP50','AP75','APs','APm','APl'] csv_format = ",".join([str(epoch+1)] + [str(round(metric_dict[key] * 100, 2)) for key in csv_metrics]) print(metric_dict, "csv format:", csv_format) self.logger.log(step=(), data={'epoch': epoch+1, 'validation_accuracy_mAP': round(metric_dict['AP'] * 100, 2)}) if self.eval_params['moving_average_decay'] > 0: self.ema_opt.swap_weights() # get base weights MPI.COMM_WORLD.Barrier() def on_epoch_end(self, epoch, logs=None): if (epoch + 1) >= self.start_eval_epoch and (epoch + 1) % self.eval_freq == 0: self.evaluate(epoch) def get_callbacks( params, training_mode, eval_params, eval_dataset, logger, time_history=True, log_steps=1, lr_tb=True, benchmark=False ): """Get callbacks for given params.""" callbacks = [] if is_main_process(): if benchmark == False: tb_callback = tf.keras.callbacks.TensorBoard( log_dir=params['model_dir'], profile_batch=0, histogram_freq = 1) callbacks.append(tb_callback) if params['moving_average_decay']: emackpt_callback = AverageModelCheckpoint( filepath=os.path.join(params['model_dir'], 'ema_weights', 'emackpt-{epoch:02d}'), update_weights=False, amp=params['mixed_precision'], verbose=1, save_freq='epoch', save_weights_only=True, period=params['checkpoint_period']) callbacks.append(emackpt_callback) ckpt_callback = tf.keras.callbacks.ModelCheckpoint( os.path.join(params['model_dir'], 'ckpt'), verbose=1, save_freq='epoch', save_weights_only=True, period=params['checkpoint_period']) callbacks.append(ckpt_callback) if time_history: time_callback = TimeHistory(params['batch_size'] * get_world_size(), logger=logger, logdir=params['model_dir'], log_steps=log_steps) callbacks.append(time_callback) # log LR in tensorboard if lr_tb == True and benchmark == False: callbacks.append(LRTensorBoard(log_dir=params['model_dir'])) hvd_callback = hvd_callbacks.BroadcastGlobalVariablesCallback(0) callbacks.append(hvd_callback) # for large batch sizes training schedule of 350/400 epochs gives better mAP # but the best mAP is generally reached after 75% of the training schedule. # So we can stop training at that point or continue to train until 300 epochs stop_75 = False if 'eval' in training_mode or '300' in training_mode else True early_stopping = StopEarlyCallback(params['num_epochs'], stop_75=stop_75) callbacks.append(early_stopping) if 'eval' in training_mode: cocoeval = COCOEvalCallback(eval_dataset, eval_freq=params['checkpoint_period'], start_eval_epoch=200, eval_params=eval_params, logger=logger) callbacks.append(cocoeval) if params['moving_average_decay']: callbacks.append(MovingAverageCallback()) if params.get('sample_image', None): display_callback = DisplayCallback( params.get('sample_image', None), os.path.join(params['model_dir'], 'train')) callbacks.append(display_callback) return callbacks
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2
tacotron2
tacotron2Builder
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_TACOTRON2BUILDER_H #define TT2I_TACOTRON2BUILDER_H #include "tacotron2Instance.h" #include <NvInfer.h> #include <memory> #include <string> #include <vector> namespace nvinfer1 { class ICudaEngine; class IBuilder; } // namespace nvinfer1 namespace tts { class Tacotron2Builder { public: /** * @brief Create a new tacotron2 builder. * * @param modelFilePath The path to the tacotron2 jit model to load weights * from. */ Tacotron2Builder(const std::string& modelFilepath); /** * @brief Build the set of engines for Tacotron2. * * @param maxInputLength The maximum input length. * @param builder The builder to use. * @param maxBatchSize The maximum batch size to build the engines for. * @param useFP16 whether or not to allow FP16. * * @return The build engines. */ std::vector<TRTPtr<nvinfer1::ICudaEngine>> build( int maxInputLength, nvinfer1::IBuilder& builder, const int maxBatchSize, const bool useFP16); private: std::string mModelFilePath; int mMelChannels; }; } // namespace tts #endif
PyTorch/LanguageModeling/BERT/triton/dist4l/runner
runner
config_NVIDIA-DGX-A100-(1x-A100-80GB)
checkpoints: - name: dist-4l-qa url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/bert_pyt_ckpt_distilled_4l_288d_qa_squad11_amp/versions/21.11.0/zip configurations: - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: trt accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: trt accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: trt accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 - 8 - 16 batch_sizes: 1 8 16 capture_cuda_graph: 0 checkpoint_variant: dist-4l-qa export_format: ts-trace export_precision: fp16 format: ts-trace max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 container_version: '21.10' datasets: - name: data datasets_dir: datasets framework: PyTorch model_name: BERT triton_container_image: null triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
CUDA-Optimized/FastSpeech/fastspeech/inferencer
inferencer
waveglow_inferencer
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import torch from fastspeech.utils.logging import tprint from fastspeech.utils.pytorch import to_cpu_numpy, to_device_async from fastspeech.inferencer.denoiser import Denoiser from waveglow.model import WaveGlow import argparse def unwrap_distributed(state_dict): """ Unwraps model from DistributedDataParallel. DDP wraps model in additional "module.", it needs to be removed for single GPU inference. :param state_dict: model's state dict """ new_state_dict = {} for key, value in state_dict.items(): new_key = key.replace('module.', '') new_state_dict[new_key] = value return new_state_dict class WaveGlowInferencer(object): def __init__(self, ckpt_file, device='cuda', use_fp16=False, use_denoiser=False): self.ckpt_file = ckpt_file self.device = device self.use_fp16 = use_fp16 self.use_denoiser = use_denoiser # model # sys.path.append('waveglow') from waveglow.arg_parser import parse_waveglow_args parser = parser = argparse.ArgumentParser() model_parser= parse_waveglow_args(parser) args, _ = model_parser.parse_known_args() model_config = dict( n_mel_channels=args.n_mel_channels, n_flows=args.flows, n_group=args.groups, n_early_every=args.early_every, n_early_size=args.early_size, WN_config=dict( n_layers=args.wn_layers, kernel_size=args.wn_kernel_size, n_channels=args.wn_channels ) ) self.model = WaveGlow(**model_config) state_dict = torch.load(self.ckpt_file, map_location=self.device)['state_dict'] state_dict = unwrap_distributed(state_dict) self.model.load_state_dict(state_dict) self.model = to_device_async(self.model, self.device) self.model = self.model.remove_weightnorm(self.model) self.model.eval() if self.use_fp16: self.model = self.model.half() self.model = self.model if self.use_denoiser: self.denoiser = Denoiser(self.model, device=device) self.denoiser = to_device_async(self.denoiser, self.device) tprint('Using WaveGlow denoiser.') def __enter__(self): pass def __exit__(self, exception_type, exception_value, traceback): pass def infer(self, mels): if self.use_fp16: mels = mels.half() mels = to_device_async(mels, self.device) wavs = self.model.infer(mels, sigma=0.6) if self.use_denoiser: wavs = self.denoiser(wavs, strength=0.01) return wavs.float()
PyTorch/Segmentation/nnUNet/utils
utils
logger
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import dllogger as logger import numpy as np import torch from dllogger import JSONStreamBackend, StdOutBackend, Verbosity from pytorch_lightning import Callback from pytorch_lightning.utilities import rank_zero_only class DLLogger: def __init__(self, log_dir, filename, append=True): super().__init__() self._initialize_dllogger(log_dir, filename, append) @rank_zero_only def _initialize_dllogger(self, log_dir, filename, append): backends = [ JSONStreamBackend(Verbosity.VERBOSE, os.path.join(log_dir, filename), append=append), StdOutBackend(Verbosity.VERBOSE), ] logger.init(backends=backends) @rank_zero_only def log_metrics(self, metrics, step=None): if step is None: step = () logger.log(step=step, data=metrics) @rank_zero_only def log_metadata(self, metric, metadata): logger.metadata(metric, metadata) @rank_zero_only def flush(self): logger.flush() class LoggingCallback(Callback): def __init__(self, log_dir, filnename, global_batch_size, mode, warmup, dim): self.dllogger = DLLogger(log_dir, filnename) self.warmup_steps = warmup self.global_batch_size = global_batch_size self.step = 0 self.dim = dim self.mode = mode self.timestamps = [] self.dllogger.log_metadata("dice_score", {"unit": None}) self.dllogger.log_metadata(f"throughput_{self.mode}", {"unit": "images/s"}) self.dllogger.log_metadata(f"latency_{self.mode}_mean", {"unit": "ms"}) for level in [90, 95, 99]: self.dllogger.log_metadata(f"latency_{self.mode}_{level}", {"unit": "ms"}) def do_step(self): if self.step > self.warmup_steps: self.step += 1 return torch.cuda.synchronize() self.timestamps.append(time.perf_counter()) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if trainer.current_epoch == 1: self.do_step() def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): if pl_module.start_benchmark == 1: self.do_step() def process_performance_stats(self): def _round3(val): return round(val, 3) elapsed_times = np.diff(self.timestamps) throughput_imgps = _round3(self.global_batch_size / np.mean(elapsed_times)) timestamps_ms = 1000 * elapsed_times stats = { f"throughput_{self.mode}": throughput_imgps, f"latency_{self.mode}_mean": _round3(np.mean(timestamps_ms)), } for level in [90, 95, 99]: stats.update({f"latency_{self.mode}_{level}": _round3(np.percentile(timestamps_ms, level))}) return stats @rank_zero_only def _log(self): stats = self.process_performance_stats() self.dllogger.log_metrics(metrics=stats) self.dllogger.flush() def on_train_end(self, trainer, pl_module): self._log() def on_test_end(self, trainer, pl_module): if pl_module.start_benchmark == 1: self._log()
PyTorch/Forecasting/TFT/triton/deployment_toolkit
deployment_toolkit
extensions
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os import re from pathlib import Path from typing import List LOGGER = logging.getLogger(__name__) class ExtensionManager: def __init__(self, name: str): self._name = name self._registry = {} def register_extension(self, extension: str, clazz): already_registered_class = self._registry.get(extension, None) if already_registered_class and already_registered_class.__module__ != clazz.__module__: raise RuntimeError( f"Conflicting extension {self._name}/{extension}; " f"{already_registered_class.__module__}.{already_registered_class.__name} " f"and " f"{clazz.__module__}.{clazz.__name__}" ) elif already_registered_class is None: clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None" LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}") self._registry[extension] = clazz def get(self, extension): if extension not in self._registry: raise RuntimeError(f"Missing extension {self._name}/{extension}") return self._registry[extension] @property def supported_extensions(self): return list(self._registry) @staticmethod def scan_for_extensions(extension_dirs: List[Path]): register_pattern = r".*\.register_extension\(.*" for extension_dir in extension_dirs: for python_path in extension_dir.rglob("*.py"): if not python_path.is_file(): continue payload = python_path.read_text() if re.findall(register_pattern, payload): import_path = python_path.relative_to(toolkit_root_dir.parent) package = import_path.parent.as_posix().replace(os.sep, ".") package_with_module = f"{package}.{import_path.stem}" spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path) my_module = importlib.util.module_from_spec(spec) my_module.__package__ = package try: spec.loader.exec_module(my_module) # pytype: disable=attribute-error except ModuleNotFoundError as e: LOGGER.error( f"Could not load extensions from {import_path} due to missing python packages; {e}" ) runners = ExtensionManager("runners") loaders = ExtensionManager("loaders") savers = ExtensionManager("savers") converters = ExtensionManager("converters") toolkit_root_dir = (Path(__file__).parent / "..").resolve() ExtensionManager.scan_for_extensions([toolkit_root_dir])
PyTorch/SpeechRecognition/Jasper/scripts
scripts
preprocess_librispeech
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env bash SPEEDS=$1 [ -n "$SPEEDS" ] && SPEED_FLAG="--speed $SPEEDS" python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/train-clean-100 \ --dest_dir /datasets/LibriSpeech/train-clean-100-wav \ --output_json /datasets/LibriSpeech/librispeech-train-clean-100-wav.json \ $SPEED_FLAG python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/train-clean-360 \ --dest_dir /datasets/LibriSpeech/train-clean-360-wav \ --output_json /datasets/LibriSpeech/librispeech-train-clean-360-wav.json \ $SPEED_FLAG python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/train-other-500 \ --dest_dir /datasets/LibriSpeech/train-other-500-wav \ --output_json /datasets/LibriSpeech/librispeech-train-other-500-wav.json \ $SPEED_FLAG python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/dev-clean \ --dest_dir /datasets/LibriSpeech/dev-clean-wav \ --output_json /datasets/LibriSpeech/librispeech-dev-clean-wav.json python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/dev-other \ --dest_dir /datasets/LibriSpeech/dev-other-wav \ --output_json /datasets/LibriSpeech/librispeech-dev-other-wav.json python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/test-clean \ --dest_dir /datasets/LibriSpeech/test-clean-wav \ --output_json /datasets/LibriSpeech/librispeech-test-clean-wav.json python ./utils/convert_librispeech.py \ --input_dir /datasets/LibriSpeech/test-other \ --dest_dir /datasets/LibriSpeech/test-other-wav \ --output_json /datasets/LibriSpeech/librispeech-test-other-wav.json
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_1GPU
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP32 on 1 GPU and trains for 6400 iterations with batch_size 8. Usage: # bash unet_FP32_1GPU.sh <path to dataset> <path to results directory> horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --log_every 100 --max_steps 6400 --batch_size 8 --exec_mode train_and_evaluate --crossvalidation_idx 0 --augment --xla
PyTorch/DrugDiscovery/MoFlow/moflow/model
model
utils
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Iterable import torch def initialize_module(module: torch.nn.Module, inputs: Iterable[torch.Tensor]) -> None: """Use given sample input to initialize the module. Module must implement method called `initialize` which takes list of input tensors """ assert hasattr(module, 'initialize') assert len(inputs) == 1, f'{len(inputs)} inputs' assert module.initialized.item() == 0, 'initialized' module.initialize(*inputs) assert module.initialized.item() == 1, 'not initialized' def initialize(model: torch.nn.Module, single_batch: Iterable[torch.Tensor]) -> None: """Initialize all sub-modules in the model given the sample input batch.""" hooks = [] for name, module in model.named_modules(): if hasattr(module, 'initialize'): logging.info(f'marking {name} for initialization') hook = module.register_forward_pre_hook(initialize_module) hooks.append(hook) _ = model(*single_batch) logging.info('all modules initialized, removing hooks') for hook in hooks: hook.remove()
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules
modules
__init__
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """isort:skip_file""" from .fairseq_dropout import FairseqDropout from .fp32_group_norm import Fp32GroupNorm, Fp32MaskedGroupNorm, MaskedGroupNorm from .gelu import gelu, gelu_accurate from .grad_multiply import GradMultiply from .gumbel_vector_quantizer import GumbelVectorQuantizer from .layer_norm import Fp32LayerNorm, LayerNorm from .multihead_attention import MultiheadAttention from .same_pad import SamePad from .transpose_last import TransposeLast __all__ = [ "Fp32GroupNorm", "Fp32LayerNorm", "Fp32MaskedGroupNorm", "MaskedGroupNorm", "gelu", "gelu_accurate", "GradMultiply", "GumbelVectorQuantizer", "LayerNorm", "MultiheadAttention", "SamePad", "TransposeLast", ]
TensorFlow/Detection/SSD/models/research/slim/nets
nets
inception_v2
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the definition for inception v2 classification network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets import inception_utils slim = tf.contrib.slim trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev) def inception_v2_base(inputs, final_endpoint='Mixed_5c', min_depth=16, depth_multiplier=1.0, use_separable_conv=True, data_format='NHWC', scope=None): """Inception v2 (6a2). Constructs an Inception v2 network from inputs to the given final endpoint. This method can construct the network up to the layer inception(5b) as described in http://arxiv.org/abs/1502.03167. Args: inputs: a tensor of shape [batch_size, height, width, channels]. final_endpoint: specifies the endpoint to construct the network up to. It can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c']. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. use_separable_conv: Use a separable convolution for the first layer Conv2d_1a_7x7. If this is False, use a normal convolution instead. data_format: Data format of the activations ('NHWC' or 'NCHW'). scope: Optional variable_scope. Returns: tensor_out: output tensor corresponding to the final_endpoint. end_points: a set of activations for external use, for example summaries or losses. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ # end_points will collect relevant activations for external use, for example # summaries or losses. end_points = {} # Used to find thinned depths for each layer. if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') depth = lambda d: max(int(d * depth_multiplier), min_depth) if data_format != 'NHWC' and data_format != 'NCHW': raise ValueError('data_format must be either NHWC or NCHW.') if data_format == 'NCHW' and use_separable_conv: raise ValueError( 'separable convolution only supports NHWC layout. NCHW data format can' ' only be used when use_separable_conv is False.' ) concat_dim = 3 if data_format == 'NHWC' else 1 with tf.variable_scope(scope, 'InceptionV2', [inputs]): with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME', data_format=data_format): # Note that sizes in the comments below assume an input spatial size of # 224x224, however, the inputs can be of any size greater 32x32. # 224 x 224 x 3 end_point = 'Conv2d_1a_7x7' if use_separable_conv: # depthwise_multiplier here is different from depth_multiplier. # depthwise_multiplier determines the output channels of the initial # depthwise conv (see docs for tf.nn.separable_conv2d), while # depth_multiplier controls the # channels of the subsequent 1x1 # convolution. Must have # in_channels * depthwise_multipler <= out_channels # so that the separable convolution is not overparameterized. depthwise_multiplier = min(int(depth(64) / 3), 8) net = slim.separable_conv2d( inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier, stride=2, padding='SAME', weights_initializer=trunc_normal(1.0), scope=end_point) else: # Use a normal convolution instead of a separable convolution. net = slim.conv2d( inputs, depth(64), [7, 7], stride=2, weights_initializer=trunc_normal(1.0), scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 112 x 112 x 64 end_point = 'MaxPool_2a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2b_1x1' net = slim.conv2d(net, depth(64), [1, 1], scope=end_point, weights_initializer=trunc_normal(0.1)) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 64 end_point = 'Conv2d_2c_3x3' net = slim.conv2d(net, depth(192), [3, 3], scope=end_point) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 56 x 56 x 192 end_point = 'MaxPool_3a_3x3' net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 192 # Inception module. end_point = 'Mixed_3b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(32), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 256 end_point = 'Mixed_3c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(64), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 28 x 28 x 320 end_point = 'Mixed_4a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d( net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat(axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(64), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d( branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(96), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4d' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_4e' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(96), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 14 x 14 x 576 end_point = 'Mixed_5a' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d( net, depth(128), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3') branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.max_pool2d(net, [3, 3], stride=2, scope='MaxPool_1a_3x3') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5b' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(160), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points # 7 x 7 x 1024 end_point = 'Mixed_5c' with tf.variable_scope(end_point): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3') with tf.variable_scope('Branch_2'): branch_2 = slim.conv2d( net, depth(192), [1, 1], weights_initializer=trunc_normal(0.09), scope='Conv2d_0a_1x1') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3') branch_2 = slim.conv2d(branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3') with tf.variable_scope('Branch_3'): branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3') branch_3 = slim.conv2d( branch_3, depth(128), [1, 1], weights_initializer=trunc_normal(0.1), scope='Conv2d_0b_1x1') net = tf.concat( axis=concat_dim, values=[branch_0, branch_1, branch_2, branch_3]) end_points[end_point] = net if end_point == final_endpoint: return net, end_points raise ValueError('Unknown final endpoint %s' % final_endpoint) def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2', global_pool=False): """Inception v2 model for classification. Constructs an Inception v2 network for classification as described in http://arxiv.org/abs/1502.03167. The default image size used to train this network is 224x224. Args: inputs: a tensor of shape [batch_size, height, width, channels]. num_classes: number of predicted classes. If 0 or None, the logits layer is omitted and the input features to the logits layer (before dropout) are returned instead. is_training: whether is training or not. dropout_keep_prob: the percentage of activation values that are retained. min_depth: Minimum depth value (number of channels) for all convolution ops. Enforced when depth_multiplier < 1, and not an active constraint when depth_multiplier >= 1. depth_multiplier: Float multiplier for the depth (number of channels) for all convolution ops. The value must be greater than zero. Typical usage will be to set this value in (0, 1) to reduce the number of parameters or computation cost of the model. prediction_fn: a function to get predictions out of logits. spatial_squeeze: if True, logits is of shape [B, C], if false logits is of shape [B, 1, 1, C], where B is batch_size and C is number of classes. reuse: whether or not the network and its variables should be reused. To be able to reuse 'scope' must be given. scope: Optional variable_scope. global_pool: Optional boolean flag to control the avgpooling before the logits layer. If false or unset, pooling is done with a fixed window that reduces default-sized inputs to 1x1, while larger inputs lead to larger outputs. If true, any input size is pooled down to 1x1. Returns: net: a Tensor with the logits (pre-softmax activations) if num_classes is a non-zero integer, or the non-dropped-out input to the logits layer if num_classes is 0 or None. end_points: a dictionary from components of the network to the corresponding activation. Raises: ValueError: if final_endpoint is not set to one of the predefined values, or depth_multiplier <= 0 """ if depth_multiplier <= 0: raise ValueError('depth_multiplier is not greater than zero.') # Final pooling and prediction with tf.variable_scope(scope, 'InceptionV2', [inputs], reuse=reuse) as scope: with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): net, end_points = inception_v2_base( inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier) with tf.variable_scope('Logits'): if global_pool: # Global average pooling. net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') end_points['global_pool'] = net else: # Pooling with a fixed kernel size. kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7]) net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size)) end_points['AvgPool_1a'] = net if not num_classes: return net, end_points # 1 x 1 x 1024 net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1') if spatial_squeeze: logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') end_points['Logits'] = logits end_points['Predictions'] = prediction_fn(logits, scope='Predictions') return logits, end_points inception_v2.default_image_size = 224 def _reduced_kernel_size_for_small_input(input_tensor, kernel_size): """Define kernel size which is automatically reduced for small input. If the shape of the input images is unknown at graph construction time this function assumes that the input images are is large enough. Args: input_tensor: input tensor of size [batch_size, height, width, channels]. kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] Returns: a tensor with the kernel size. TODO(jrru): Make this function work with unknown shapes. Theoretically, this can be done with the code below. Problems are two-fold: (1) If the shape was known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot handle tensors that define the kernel size. shape = tf.shape(input_tensor) return = tf.stack([tf.minimum(shape[1], kernel_size[0]), tf.minimum(shape[2], kernel_size[1])]) """ shape = input_tensor.get_shape().as_list() if shape[1] is None or shape[2] is None: kernel_size_out = kernel_size else: kernel_size_out = [min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])] return kernel_size_out inception_v2_arg_scope = inception_utils.inception_arg_scope
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading
dataloading
transcode
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser import os from collections import defaultdict import numpy as np import pandas as pd from .feature_spec import FeatureSpec, get_categorical_feature_type from .defaults import CATEGORICAL_CHANNEL, NUMERICAL_CHANNEL, LABEL_CHANNEL, CARDINALITY_SELECTOR def parse_args(): parser = ArgumentParser() parser.add_argument('--input', type=str, default='', help='Path to input data directory') parser.add_argument('--feature_spec_in', type=str, default='feature_spec.yaml', help='Name of the input feature specification file') parser.add_argument('--output', type=str, default='/data', help='Path to output data directory') parser.add_argument('--feature_spec_out', type=str, default='feature_spec.yaml', help='Name of the output feature specification file') parser.add_argument('--chunk_size', type=int, default=65536) return parser.parse_args() def main(): args = parse_args() args_output = args.output args_input = args.input args_feature_spec_in = args.feature_spec_in args_feature_spec_out = args.feature_spec_out batch_size = args.chunk_size fspec_in_path = os.path.join(args_input, args_feature_spec_in) fspec_in = FeatureSpec.from_yaml(fspec_in_path) input_label_feature_name = fspec_in.channel_spec[LABEL_CHANNEL][0] input_numerical_features_list = fspec_in.channel_spec[NUMERICAL_CHANNEL] input_categorical_features_list = fspec_in.channel_spec[CATEGORICAL_CHANNEL] # Do a pass to establish the cardinalities: they influence the type we save the dataset as found_cardinalities = defaultdict(lambda: 0) for mapping_name, mapping in fspec_in.source_spec.items(): df_iterators = [] for chunk in mapping: assert chunk['type'] == 'csv', "Only csv files supported in this transcoder" assert len(chunk['files']) == 1, "Only one file per chunk supported in this transcoder" path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0]) chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features']) df_iterators.append(chunk_iterator) zipped = zip(*df_iterators) for chunks in zipped: mapping_df = pd.concat(chunks, axis=1) for feature in input_categorical_features_list: mapping_cardinality = mapping_df[feature].max() + 1 previous_cardinality = found_cardinalities[feature] found_cardinalities[feature] = max(previous_cardinality, mapping_cardinality) for feature in input_categorical_features_list: declared_cardinality = fspec_in.feature_spec[feature][CARDINALITY_SELECTOR] if declared_cardinality == 'auto': pass else: assert int(declared_cardinality) >= found_cardinalities[feature] found_cardinalities[feature] = int(declared_cardinality) categorical_cardinalities = [found_cardinalities[f] for f in input_categorical_features_list] number_of_numerical_features = fspec_in.get_number_of_numerical_features() fspec_out = FeatureSpec.get_default_feature_spec(number_of_numerical_features=number_of_numerical_features, categorical_feature_cardinalities=categorical_cardinalities) fspec_out.base_directory = args.output for mapping_name, mapping in fspec_in.source_spec.items(): # open files for outputting label_path, numerical_path, categorical_paths = fspec_out.get_mapping_paths(mapping_name) for path in [label_path, numerical_path, *categorical_paths.values()]: os.makedirs(os.path.dirname(path), exist_ok=True) output_categorical_features_list = fspec_out.get_categorical_feature_names() numerical_f = open(numerical_path, "ab+") label_f = open(label_path, "ab+") categorical_fs = [open(categorical_paths[name], "ab+") for name in output_categorical_features_list] categorical_feature_types = [get_categorical_feature_type(card) for card in categorical_cardinalities] df_iterators = [] for chunk in mapping: # We checked earlier it's a single file chunk path_to_load = os.path.join(fspec_in.base_directory, chunk['files'][0]) chunk_iterator = pd.read_csv(path_to_load, header=None, chunksize=batch_size, names=chunk['features']) df_iterators.append(chunk_iterator) zipped = zip(*df_iterators) for chunks in zipped: mapping_df = pd.concat(chunks, axis=1) # This takes care of making sure feature names are unique # Choose the right columns numerical_df = mapping_df[input_numerical_features_list] categorical_df = mapping_df[input_categorical_features_list] label_df = mapping_df[[input_label_feature_name]] # Append them to the binary files numerical_f.write(numerical_df.values.astype(np.float16).tobytes()) label_f.write(label_df.values.astype(bool).tobytes()) categorical_arr = categorical_df.values for cat_idx, cat_feature_type in enumerate(categorical_feature_types): categorical_fs[cat_idx].write( categorical_arr[:, cat_idx].astype(cat_feature_type).tobytes()) feature_spec_save_path = os.path.join(args_output, args_feature_spec_out) fspec_out.to_yaml(output_path=feature_spec_save_path) if __name__ == '__main__': main()
PyTorch/SpeechSynthesis/FastPitch/hifigan
hifigan
logging
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from collections import OrderedDict from copy import copy from pathlib import Path import dllogger import numpy as np import torch.distributed as dist import torch from dllogger import StdOutBackend, JSONStreamBackend, Verbosity from common import tb_dllogger from common.tb_dllogger import (stdout_metric_format, stdout_step_format, unique_log_fpath, TBLogger) def init_logger(output_dir, log_file, ema_decay=0.0): local_rank = 0 if not dist.is_initialized() else dist.get_rank() print('logger init', local_rank) if local_rank == 0: Path(output_dir).mkdir(parents=False, exist_ok=True) log_fpath = log_file or Path(output_dir, 'nvlog.json') dllogger.init(backends=[ JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)), StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format, metric_format=stdout_metric_format)]) init_train_metadata() else: dllogger.init(backends=[]) tb_train = ['train'] tb_val = ['val'] tb_ema = [k + '_ema' for k in tb_val] if ema_decay > 0.0 else [] tb_dllogger.tb_loggers = { s: TBLogger(enabled=(local_rank == 0), log_dir=output_dir, name=s) for s in tb_train + tb_val + tb_ema} def init_train_metadata(): dllogger.metadata("train_lrate_gen", {"name": "g lr", "unit": None, "format": ":>3.2e"}) dllogger.metadata("train_lrate_discrim", {"name": "d lr", "unit": None, "format": ":>3.2e"}) dllogger.metadata("train_avg_lrate_gen", {"name": "avg g lr", "unit": None, "format": ":>3.2e"}) dllogger.metadata("train_avg_lrate_discrim", {"name": "avg d lr", "unit": None, "format": ":>3.2e"}) for id_, pref in [('train', ''), ('train_avg', 'avg train '), ('val', ' avg val '), ('val_ema', ' EMA val ')]: dllogger.metadata(f"{id_}_loss_gen", {"name": f"{pref}g loss", "unit": None, "format": ":>6.3f"}) dllogger.metadata(f"{id_}_loss_discrim", {"name": f"{pref}d loss", "unit": None, "format": ":>6.3f"}) dllogger.metadata(f"{id_}_loss_mel", {"name": f"{pref}mel loss", "unit": None, "format": ":>6.3f"}) dllogger.metadata(f"{id_}_frames/s", {"name": None, "unit": "frames/s", "format": ":>8.2f"}) dllogger.metadata(f"{id_}_took", {"name": "took", "unit": "s", "format": ":>3.2f"}) def init_infer_metadata(): raise NotImplementedError # modalities = [('latency', 's', ':>10.5f'), ('RTF', 'x', ':>10.2f'), # ('frames/s', None, ':>10.2f'), ('samples/s', None, ':>10.2f'), # ('letters/s', None, ':>10.2f')] # for perc in ['', 'avg', '90%', '95%', '99%']: # for model in ['fastpitch', 'waveglow', '']: # for mod, unit, format in modalities: # name = f'{perc} {model} {mod}'.strip().replace(' ', ' ') # dllogger.metadata( # name.replace(' ', '_'), # {'name': f'{name: <26}', 'unit': unit, 'format': format}) class defaultdict(OrderedDict): """A simple, ordered defaultdict.""" def __init__(self, type_, *args, **kwargs): self.type_ = type_ super().__init__(*args, **kwargs) def __getitem__(self, key): if key not in self: self.__setitem__(key, self.type_()) return super().__getitem__(key) def __copy__(self): return defaultdict(self.type_, self) class Metrics(dict): def __init__(self, scopes=['train', 'train_avg'], dll_keys=['loss_gen', 'loss_discrim', 'loss_mel', 'frames/s', 'took', 'lrate_gen', 'lrate_discrim'], benchmark_epochs=0): super().__init__() self.dll_keys = dll_keys self.metrics = {scope: defaultdict(float) for scope in scopes} self.metric_counts = {scope: defaultdict(int) for scope in scopes} self.start_time = {scope: None for scope in scopes} self.benchmark_epochs = benchmark_epochs if benchmark_epochs > 0: self.metrics['train_benchmark'] = defaultdict(list) def __setitem__(self, key, val): extract = lambda t: t.item() if type(t) is torch.Tensor else t if type(val) is dict: for k, v in val.items(): super().__setitem__(k, extract(v)) else: super().__setitem__(key, extract(val)) def __getitem__(self, key): if key not in self: self.__setitem__(key, 0.0) return super().__getitem__(key) def start_accumulating(self, step, start_timer=True, scope='train'): del step # unused self.clear() self.metrics[scope].clear() self.metric_counts[scope].clear() if start_timer: self.start_time[scope] = time.time() def accumulate(self, scopes=['train', 'train_avg']): for scope in scopes: for k, v in self.items(): self.metrics[scope][k] += v self.metric_counts[scope][k] += 1 self.clear() def finish_accumulating(self, stop_timer=True, scope='train'): metr = self.metrics[scope] counts = self.metric_counts[scope] for k, v in metr.items(): metr[k] = v / counts[k] if stop_timer: took = time.time() - self.start_time[scope] if 'frames' in metr: metr['frames/s'] = metr.pop('frames') * counts['frames'] / took metr['took'] = took def start_iter(self, iter, start_timer=True): self.start_accumulating(iter, start_timer, 'train') def start_epoch(self, epoch, start_timer=True): self.start_accumulating(epoch, start_timer, 'train_avg') def start_val(self, start_timer=True): self.start_accumulating(None, start_timer, 'val') def finish_iter(self, stop_timer=True): self.finish_accumulating(stop_timer, 'train') def finish_epoch(self, stop_timer=True): self.finish_accumulating(stop_timer, 'train_avg') metr = self.metrics['train_benchmark'] for k in ('took', 'frames/s', 'loss_gen', 'loss_discrim', 'loss_mel'): metr[k].append(self.metrics['train_avg'][k]) if len(metr[k]) > self.benchmark_epochs: metr[k].pop(0) def finish_val(self, stop_timer=True): self.finish_accumulating(stop_timer, 'val') def get_metrics(self, scope='train', target='dll'): if scope == 'train_benchmark': metr = self.metrics[scope] ret = {'train_' + k: np.mean(v) for k, v in metr.items()} ret['benchmark_epochs_num'] = len(list(metr.values())[0]) return ret ret = copy(self.metrics[scope]) if scope == 'train': ret.update(self) if target == 'dll': ret = {f'{scope}_{k}': v for k, v in ret.items() if k in self.dll_keys} elif target == 'tb': # Rename keys so they would group nicely inside TensorBoard def split_key(k): pos = k.rfind('_') return k[:pos] + '/' + k[pos+1:] if pos >= 0 else k ret = {split_key(k): v for k, v in ret.items()} return ret
PyTorch/LanguageModeling/BERT/triton/large/runner
runner
prepare_datasets
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/usr/bin/env bash mkdir -p datasets/data/squad/v1.1 wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O datasets/data/squad/v1.1/train-v1.1.json wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O datasets/data/squad/v1.1/dev-v1.1.json wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O datasets/data/squad/v1.1/evaluate-v1.1.py if [[ ! -d "/workspace/bert/data/download/google_pretrained_weights" ]] then python3 data/bertPrep.py --action download --dataset google_pretrained_weights fi ln -s /workspace/bert/data/download/google_pretrained_weights datasets/data/google_pretrained_weights
TensorFlow2/Recommendation/WideAndDeep/triton/scripts/docker
docker
build
#!/usr/bin/env bash # Copyright (c) 2021-2022,NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. docker build -t widendeep . -f triton/Dockerfile
PyTorch/Classification/GPUNet/triton/225ms-D/runner
runner
pipeline_impl
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.pipeline import Pipeline pipeline = Pipeline() pipeline.model_export( commands=( r""" if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --ignore-unknown-parameters \ --onnx-opset 13 \ --torch-jit ${TORCH_JIT} \ \ --config /workspace/gpunet/configs/batch1/GV100/2.25ms-D.json \ --checkpoint ${CHECKPOINT_DIR}/2.25ms-D.pth.tar \ --precision ${EXPORT_PRECISION} \ \ --dataloader triton/dataloader.py \ --val-path ${DATASETS_DIR}/ \ --is-prunet False \ --batch-size 1 """, ) ) pipeline.model_conversion( commands=( r""" if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then export FORMAT_SUFFIX="pt" else export FORMAT_SUFFIX="${EXPORT_FORMAT}" fi model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --max-batch-size ${MAX_BATCH_SIZE} \ --container-version 21.12 \ --max-workspace-size 10000000000 \ --atol OUTPUT__0=100 \ --rtol OUTPUT__0=100 """, ) ) pipeline.model_deploy( commands=( r""" model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${FORMAT} \ --model-control-mode explicit \ --load-model \ --load-model-timeout-s 100 \ --verbose \ \ --backend-accelerator ${BACKEND_ACCELERATOR} \ --tensorrt-precision ${PRECISION} \ --tensorrt-capture-cuda-graph \ --tensorrt-max-workspace-size 10000000000 \ --max-batch-size ${MAX_BATCH_SIZE} \ --batching ${MODEL_BATCHING} \ --preferred-batch-sizes ${MAX_BATCH_SIZE} \ --engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES} """, ) ) pipeline.triton_performance_offline_tests( commands=( r""" python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 2 4 8 16 32 64 \ --concurrency 1 \ --evaluation-mode offline \ --measurement-request-count 10 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv """, ), result_path="${SHARED_DIR}/triton_performance_offline.csv", ) pipeline.triton_performance_online_tests( commands=( r""" python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data random \ --batch-sizes 1 \ --concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \ --evaluation-mode online \ --measurement-request-count 500 \ --warmup \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_online.csv """, ), result_path="${SHARED_DIR}/triton_performance_online.csv", )
TensorFlow/Detection/SSD/models/research/slim/preprocessing
preprocessing
lenet_preprocessing
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities for preprocessing.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf slim = tf.contrib.slim def preprocess_image(image, output_height, output_width, is_training): """Preprocesses the given image. Args: image: A `Tensor` representing an image of arbitrary size. output_height: The height of the image after preprocessing. output_width: The width of the image after preprocessing. is_training: `True` if we're preprocessing the image for training and `False` otherwise. Returns: A preprocessed image. """ image = tf.to_float(image) image = tf.image.resize_image_with_crop_or_pad( image, output_width, output_height) image = tf.subtract(image, 128.0) image = tf.div(image, 128.0) return image
TensorFlow/Detection/SSD/models/research/object_detection/models
models
ssd_mobilenet_v1_feature_extractor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSDFeatureExtractor for MobilenetV1 features.""" import tensorflow as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 slim = tf.contrib.slim class SSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """MobileNetV1 Feature Extractor for SSD Models. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. reuse_weights: Whether to reuse variables. Default is None. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) feature_map_layout = { 'from_layer': ['Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', '', ''], 'layer_depth': [-1, -1, 512, 256, 256, 128], 'use_explicit_padding': self._use_explicit_padding, 'use_depthwise': self._use_depthwise, } with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, use_explicit_padding=self._use_explicit_padding, scope=scope) with slim.arg_scope(self._conv_hyperparams_fn()): feature_maps = feature_map_generators.multi_resolution_feature_maps( feature_map_layout=feature_map_layout, depth_multiplier=self._depth_multiplier, min_depth=self._min_depth, insert_1x1_conv=True, image_features=image_features) return feature_maps.values()
PyTorch/LanguageModeling/BERT/distillation
distillation
task_distill
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team and Huawei Noah's Ark Lab. # Copyright (c) 2018-2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT Distillation finetuning runner.""" from __future__ import absolute_import, division, print_function import argparse import csv import logging import os import random import sys import time, datetime import math import numpy as np import torch from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset) from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm, trange from torch.nn import CrossEntropyLoss, MSELoss from scipy.stats import pearsonr, spearmanr from sklearn.metrics import matthews_corrcoef, f1_score sys.path.append('/workspace/bert/') from modeling import BertForSequenceClassification, BertForQuestionAnswering, Project, WEIGHTS_NAME, CONFIG_NAME from schedulers import LinearWarmUpScheduler, ConstantLR from tokenization_utils import BertTokenizer from apex.optimizers import FusedAdam from hooks import * from losses import * from utils.utils import is_main_process, get_rank, get_world_size, unwrap_ddp, set_seed from utils.squad.squad_metrics import compute_predictions, squad_evaluate import utils.squad.squad_metrics as squad_metrics from utils.squad.squad_utils import squad_convert_examples_to_features, SquadResult, RawResult, SquadV1Processor, SquadV2Processor, load_and_cache_examples from itertools import chain csv.field_size_limit(sys.maxsize) log_format = '%(asctime)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p') fh = logging.FileHandler('debug_layer_loss.log') fh.setFormatter(logging.Formatter(log_format)) logging.getLogger().addHandler(fh) logger = logging.getLogger() oncloud = True try: import moxing as mox except: oncloud = False class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.seq_length = seq_length self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliMismatchedProcessor(MnliProcessor): """Processor for the MultiNLI Mismatched data set (GLUE version).""" def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched") class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the SST-2 data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" print("Using train data") return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): print("Using Aug Data") return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class StsbProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return [None] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[7] text_b = line[8] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QqpProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) try: text_a = line[3] text_b = line[4] label = line[5] except IndexError: continue examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class QnliProcessor(DataProcessor): """Processor for the STS-B data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class RteProcessor(DataProcessor): """Processor for the RTE data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_aug_examples(self, data_dir): return self._create_examples( self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") def get_labels(self): """See base class.""" return ["entailment", "not_entailment"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class WnliProcessor(DataProcessor): """Processor for the WNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[1] text_b = line[2] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) input_mask = [1] * len(input_ids) seq_length = len(input_ids) padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length if output_mode == "classification": label_id = label_map[example.label] elif output_mode == "regression": label_id = float(example.label) else: raise KeyError(output_mode) if ex_index < 1: logger.info("*** Example ***") logger.info("guid: %s" % (example.guid)) logger.info("tokens: %s" % " ".join( [str(x) for x in tokens])) logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) logger.info( "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) logger.info("label: {}".format(example.label)) logger.info("label_id: {}".format(label_id)) features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, seq_length=seq_length)) return features def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def simple_accuracy(preds, labels): return (preds == labels).mean() def acc_and_f1(preds, labels): acc = simple_accuracy(preds, labels) f1 = f1_score(y_true=labels, y_pred=preds) return { "acc": acc, "f1": f1, "acc_and_f1": (acc + f1) / 2, } def pearson_and_spearman(preds, labels): pearson_corr = pearsonr(preds, labels)[0] spearman_corr = spearmanr(preds, labels)[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def compute_metrics(task_name, preds, labels): assert len(preds) == len(labels) if task_name == "cola": return {"mcc": matthews_corrcoef(labels, preds)} elif task_name == "sst-2": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mrpc": return acc_and_f1(preds, labels) elif task_name == "sts-b": return pearson_and_spearman(preds, labels) elif task_name == "qqp": return acc_and_f1(preds, labels) elif task_name == "mnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "mnli-mm": return {"acc": simple_accuracy(preds, labels)} elif task_name == "qnli": return {"acc": simple_accuracy(preds, labels)} elif task_name == "rte": return {"acc": simple_accuracy(preds, labels)} elif task_name == "wnli": return {"acc": simple_accuracy(preds, labels)} else: raise KeyError(task_name) def get_tensor_data(output_mode, features): if output_mode == "classification": all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long) elif output_mode == "regression": all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float) all_seq_lengths = torch.tensor([f.seq_length for f in features], dtype=torch.long) all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) tensor_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_seq_lengths) return tensor_data, all_label_ids def result_to_file(result, file_name, step, train_summary_writer): with open(file_name, "a") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" {} = {}".format(key, str(result[key]))) writer.write("%s = %s\n" % (key, str(result[key]))) train_summary_writer.add_scalar(key, result[key], step) def do_eval(model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, amp): eval_loss = 0 nb_eval_steps = 0 preds = [] for batch_ in tqdm(eval_dataloader, desc="Evaluating"): batch_ = tuple(t.to(device) for t in batch_) with torch.no_grad(): input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch_ with torch.cuda.amp.autocast(enabled=amp): logits = model(input_ids, segment_ids, input_mask) # create eval loss and other metric required by the task if output_mode == "classification": loss_fct = CrossEntropyLoss() tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1)) elif output_mode == "regression": loss_fct = MSELoss() tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1)) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if len(preds) == 0: preds.append(logits.detach().cpu().numpy()) else: preds[0] = np.append( preds[0], logits.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps preds = preds[0] if output_mode == "classification": preds = np.argmax(preds, axis=1) elif output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(task_name, preds, eval_labels.numpy()) result['eval_loss'] = eval_loss return result def main(): parser = argparse.ArgumentParser() parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--teacher_model", default=None, type=str, help="The teacher model dir.") parser.add_argument("--student_model", default=None, type=str, required=True, help="The student model dir.") parser.add_argument("--task_name", default=None, type=str, required=True, help="The name of the task to train.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--amp', action='store_true', default=False, help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument('--weight_decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay') parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1, type=int, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--max_grad_norm', type=float, default=1., help="Gradient Clipping threshold") # added arguments parser.add_argument('--aug_train', action='store_true') parser.add_argument('--value_state_loss', action='store_true', default=False) parser.add_argument('--hidden_state_loss', action='store_true', default=False) parser.add_argument('--use_last_layer', action='store_true', default=False) parser.add_argument('--use_kld', action='store_true', default=False) parser.add_argument('--use_cosine', action='store_true', default=False) parser.add_argument('--average_loss', action='store_true', default=False) parser.add_argument('--eval_step', type=int, default=50) parser.add_argument('--pred_distill', action='store_true') parser.add_argument('--data_url', type=str, default="") parser.add_argument('--temperature', type=float, default=1.) #SQUAD args parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.", ) parser.add_argument( "--verbose_logging", action="store_true", help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.", ) parser.add_argument( "--lang_id", default=0, type=int, help="language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)", ) parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--distill_config', default="distillation_config.json", type=str, help="path the distillation config") args = parser.parse_args() if os.path.exists(args.output_dir) and os.listdir(args.output_dir): raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir)) if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir, exist_ok=True) # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.device = device logger.info('device: {}'.format(device)) logger.info('The args: {}'.format(args)) processors = { "cola": ColaProcessor, "mnli": MnliProcessor, "mnli-mm": MnliMismatchedProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, "sts-b": StsbProcessor, "qqp": QqpProcessor, "qnli": QnliProcessor, "rte": RteProcessor, "wnli": WnliProcessor, "squadv1.1": SquadV1Processor, } output_modes = { "cola": "classification", "mnli": "classification", "mrpc": "classification", "sst-2": "classification", "sts-b": "regression", "qqp": "classification", "qnli": "classification", "rte": "classification", "wnli": "classification", "squadv1.1": "classification", } # intermediate distillation default parameters default_params = { "cola": {"num_train_epochs": 50, "max_seq_length": 64}, "mnli": {"num_train_epochs": 5, "max_seq_length": 128}, "mrpc": {"num_train_epochs": 20, "max_seq_length": 128}, "sst-2": {"num_train_epochs": 10, "max_seq_length": 64}, "sts-b": {"num_train_epochs": 20, "max_seq_length": 128}, "qqp": {"num_train_epochs": 5, "max_seq_length": 128}, "qnli": {"num_train_epochs": 10, "max_seq_length": 128}, "rte": {"num_train_epochs": 20, "max_seq_length": 128}, "squadv1.1": {"num_train_epochs": 6, "max_seq_length": 384}, } acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte", "squadv1.1"] corr_tasks = ["sts-b"] mcc_tasks = ["cola"] # Prepare devices #device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.info("device: {} n_gpu: {}".format(device, n_gpu)) set_seed(args.seed, n_gpu) # Prepare task settings # Set up tensorboard current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") train_log_dir = os.path.join(args.output_dir, current_time, 'train_' + str(get_rank()) + '_of_' + str(get_world_size())) train_summary_writer = SummaryWriter(train_log_dir) task_name = args.task_name.lower() if task_name in default_params: args.max_seq_len = default_params[task_name]["max_seq_length"] if task_name not in processors: raise ValueError("Task not found: %s" % task_name) processor = processors[task_name]() output_mode = output_modes[task_name] label_list = processor.get_labels() if task_name != "squadv1.1" else [] num_labels = len(label_list) tokenizer = BertTokenizer.from_pretrained(args.student_model, do_lower_case=args.do_lower_case) if not args.do_eval: if task_name == "squadv1.1": train_data = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) else: if not args.aug_train: train_examples = processor.get_train_examples(args.data_dir) else: train_examples = processor.get_aug_examples(args.data_dir) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) if task_name != "squadv1.1": num_train_optimization_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, output_mode) train_data, _ = get_tensor_data(output_mode, train_features) if args.local_rank == -1: train_sampler = RandomSampler(train_data) else: train_sampler = torch.utils.data.distributed.DistributedSampler(train_data) train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True) if args.max_steps > 0: num_train_optimization_steps = args.max_steps else: num_train_optimization_steps = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs if "SQuAD" not in args.task_name: eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) eval_data, eval_labels = get_tensor_data(output_mode, eval_features) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4) if not args.do_eval: if "SQuAD" not in args.task_name: teacher_model, teacher_config = BertForSequenceClassification.from_pretrained(args.teacher_model, distill_config=args.distill_config, num_labels=num_labels, pooler=True) else: teacher_model, teacher_config = BertForQuestionAnswering.from_pretrained(args.teacher_model, distill_config=args.distill_config, pooler=False) if "SQuAD" not in args.task_name: student_model, student_config = BertForSequenceClassification.from_pretrained(args.student_model, distill_config=args.distill_config, num_labels=num_labels, pooler=True) else: student_model, student_config = BertForQuestionAnswering.from_pretrained(args.student_model, distill_config=args.distill_config, pooler=False) # We need a projection layer since teacher.hidden_size != student.hidden_size if not args.do_eval: use_projection = student_config.hidden_size != teacher_config.hidden_size if student_config.distillation_config["use_hidden_states"] and use_projection: project = Project(student_config, teacher_config) project_model_file = os.path.join(args.student_model, "project.bin") project_ckpt = torch.load(project_model_file, map_location="cpu") project.load_state_dict(project_ckpt) else: use_projection = False distill_config = {"nn_module_names": []} #Empty list since we don't want to use nn module hooks here distill_hooks_student, distill_hooks_teacher = DistillHooks(distill_config), DistillHooks(distill_config) student_model.register_forward_hook(distill_hooks_student.child_to_main_hook) if not args.do_eval: teacher_model.register_forward_hook(distill_hooks_teacher.child_to_main_hook) if not args.do_eval: teacher_model.to(device) student_model.to(device) if student_config.distillation_config["use_hidden_states"] and use_projection: project.to(device) if args.local_rank != -1: if not args.do_eval: teacher_model = torch.nn.parallel.DistributedDataParallel( teacher_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) student_model = torch.nn.parallel.DistributedDataParallel( student_model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if student_config.distillation_config["use_hidden_states"] and use_projection: project = torch.nn.parallel.DistributedDataParallel( project, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=False ) if args.do_eval: global_step = 0 num_examples = 0 eval_start = time.time() logger.info("***** Running evaluation *****") student_model.eval() if "SQuAD" not in args.task_name: logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) num_examples = len(eval_examples) else: dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) logger.info(" Num examples = %d", len(features)) logger.info(" Batch size = %d", args.eval_batch_size) result = squad_metrics.evaluate(args, student_model, dataset, examples, features, prefix="final")#global_step) result["global_step"] = global_step num_examples = len(features) eval_end = time.time() logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) logger.info("time for inference {} perf {}".format(eval_end - eval_start, num_examples * 100 / (eval_end - eval_start))) else: scaler = torch.cuda.amp.GradScaler() logger.info("***** Running training *****") logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", num_train_optimization_steps) # Prepare optimizer param_optimizer = list(student_model.named_parameters()) if student_config.distillation_config["use_hidden_states"] and use_projection: param_optimizer += list(project.named_parameters()) size = 0 for n, p in student_model.named_parameters(): logger.info('n: {}'.format(n)) size += p.nelement() logger.info('Total parameters: {}'.format(size)) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0} ] optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False) schedule = 'warmup_linear' if not student_config.distillation_config["use_pred_states"]: scheduler = ConstantLR(optimizer) else: scheduler = LinearWarmUpScheduler(optimizer, warmup=args.warmup_proportion, total_steps=num_train_optimization_steps) transformer_losses = TransformerLosses(student_config, teacher_config, device, args) def soft_cross_entropy(predicts, targets): student_likelihood = torch.nn.functional.log_softmax(predicts, dim=-1) targets_prob = torch.nn.functional.softmax(targets, dim=-1) return (- targets_prob * student_likelihood).mean() # Train and evaluate global_step = 0 best_dev_acc = 0.0 output_eval_file = os.path.join(args.output_dir, "eval_results.txt") iter_start = time.time() for epoch_ in trange(int(args.num_train_epochs), desc="Epoch"): tr_loss = 0. tr_att_loss = 0. tr_rep_loss = 0. tr_value_loss = 0. tr_cls_loss = 0. student_model.train() nb_tr_examples, nb_tr_steps = 0, 0 for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", ascii=True)): batch = tuple(t.to(device) for t in batch) if "squad" in task_name: input_ids, input_mask, segment_ids = batch[:3] label_ids = None else: input_ids, input_mask, segment_ids, label_ids, seq_lengths = batch if input_ids.size()[0] != args.train_batch_size: continue att_loss = 0. rep_loss = 0. value_loss = 0. cls_loss = 0. with torch.cuda.amp.autocast(enabled=args.amp): student_model(input_ids, segment_ids, input_mask) # Gather student states extracted by hooks temp_model = unwrap_ddp(student_model) student_atts = flatten_states(temp_model.distill_states_dict, "attention_scores") student_reps = flatten_states(temp_model.distill_states_dict, "hidden_states") student_values = flatten_states(temp_model.distill_states_dict, "value_states") student_embeddings = flatten_states(temp_model.distill_states_dict, "embedding_states") student_logits = flatten_states(temp_model.distill_states_dict, "pred_states") if student_config.distillation_config["use_attention_scores"]: bsz, attn_heads, seq_len, _ = student_atts[0].shape #No gradient for teacher training with torch.no_grad(): teacher_model(input_ids, segment_ids, input_mask) # Gather teacher states extracted by hooks temp_model = unwrap_ddp(teacher_model) teacher_atts = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "attention_scores")] teacher_reps = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "hidden_states")] teacher_values = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "value_states")] if student_config.distillation_config["use_pred_states"]: if "squad" in task_name: teacher_logits = [[i.detach() for i in flatten_states(temp_model.distill_states_dict, "pred_states")[0]]] else: teacher_logits = [flatten_states(temp_model.distill_states_dict, "pred_states")[0].detach()] teacher_embeddings = [i.detach() for i in flatten_states(temp_model.distill_states_dict, "embedding_states")] #MiniLM if student_config.distillation_config["student_teacher_layer_mapping"] == "last_layer": if student_config.distillation_config["use_attention_scores"]: student_atts = [student_atts[-1]] new_teacher_atts = [teacher_atts[-1]] if student_config.distillation_config["use_value_states"]: student_values = [student_values[-1]] new_teacher_values = [teacher_values[-1]] if student_config.distillation_config["use_hidden_states"]: new_teacher_reps = [teacher_reps[-1]] new_student_reps = [student_reps[-1]] else: if student_config.distillation_config["use_attention_scores"]: teacher_layer_num = len(teacher_atts) student_layer_num = len(student_atts) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_value_states"]: teacher_layer_num = len(teacher_values) student_layer_num = len(student_values) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_values = [teacher_values[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] if student_config.distillation_config["use_hidden_states"]: teacher_layer_num = len(teacher_reps) student_layer_num = len(student_reps) assert teacher_layer_num % student_layer_num == 0 layers_per_block = int(teacher_layer_num / student_layer_num) new_teacher_reps = [teacher_reps[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] new_student_reps = student_reps if student_config.distillation_config["use_attention_scores"]: att_loss = transformer_losses.compute_loss(student_atts, new_teacher_atts, loss_name="attention_loss") if student_config.distillation_config["use_hidden_states"]: if student_config.distillation_config["use_hidden_states"] and use_projection: rep_loss = transformer_losses.compute_loss(project(new_student_reps), new_teacher_reps, loss_name="hidden_state_loss") else: rep_loss = transformer_losses.compute_loss(new_student_reps, new_teacher_reps, loss_name="hidden_state_loss") if student_config.distillation_config["use_embedding_states"]: if student_config.distillation_config["use_hidden_states"] and use_projection: rep_loss += transformer_losses.compute_loss(project(student_embeddings), teacher_embeddings, loss_name="embedding_state_loss") else: rep_loss += transformer_losses.compute_loss(student_embeddings, teacher_embeddings, loss_name="embedding_state_loss") if student_config.distillation_config["use_value_states"]: value_loss = transformer_losses.compute_loss(student_values, new_teacher_values, loss_name="value_state_loss") if not args.average_loss: loss = rep_loss + att_loss + value_loss else: loss = (rep_loss / len(new_student_reps)) + (att_loss / len(student_atts)) + (value_loss / len(student_values)) if student_config.distillation_config["use_attention_scores"]: tr_att_loss += att_loss.item() if student_config.distillation_config["use_hidden_states"]: tr_rep_loss += rep_loss.item() if student_config.distillation_config["use_value_states"]: tr_value_loss += value_loss.item() #pred layer specific if student_config.distillation_config["use_pred_states"]: if output_mode == "classification": if "squad" in task_name: cls_loss = 0. # Iterate over start and end logits for index, student_logit in enumerate(student_logits[0]): cls_loss += soft_cross_entropy(student_logit / args.temperature, teacher_logits[0][index] / args.temperature) else: cls_loss = soft_cross_entropy(student_logits[0] / args.temperature, teacher_logits[0] / args.temperature) elif output_mode == "regression": loss_mse = MSELoss() cls_loss = loss_mse(student_logits[0].view(-1), label_ids.view(-1)) loss = cls_loss tr_cls_loss += cls_loss.item() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.amp: scaler.scale(loss).backward() scaler.unscale_(optimizer) else: loss.backward() if student_config.distillation_config["use_hidden_states"] and use_projection: torch.nn.utils.clip_grad_norm_(chain(student_model.parameters(), project.parameters()), args.max_grad_norm, error_if_nonfinite=False) else: torch.nn.utils.clip_grad_norm_(student_model.parameters(), args.max_grad_norm, error_if_nonfinite=False) tr_loss += loss.item() nb_tr_examples += input_ids.size(0) nb_tr_steps += 1 if (step + 1) % args.gradient_accumulation_steps == 0: scheduler.step() if args.amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() global_step += 1 if (global_step + 1) % args.eval_step == 0 and is_main_process(): logger.info("***** Running evaluation *****") logger.info(" Epoch = {} iter {} step".format(epoch_, global_step)) if not "squad" in task_name: logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) student_model.eval() loss = tr_loss / (step + 1) cls_loss = tr_cls_loss / (step + 1) att_loss = tr_att_loss / (step + 1) rep_loss = tr_rep_loss / (step + 1) value_loss = tr_value_loss / (step + 1) result = {} if student_config.distillation_config["use_pred_states"]: if "SQuAD" not in args.task_name: result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) else: dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) result = squad_metrics.evaluate(args, student_model, dataset, examples, features, prefix="final")#global_step) result['global_step'] = global_step result['lr'] = optimizer.param_groups[0]["lr"] result['cls_loss'] = cls_loss result['att_loss'] = att_loss result['rep_loss'] = rep_loss result['value_loss'] = value_loss result['loss'] = loss result['perf'] = (global_step + 1) * get_world_size() * args.train_batch_size * args.gradient_accumulation_steps / (time.time() - iter_start) if is_main_process(): result_to_file(result, output_eval_file, global_step, train_summary_writer) if not student_config.distillation_config["use_pred_states"]: save_model = True else: save_model = False if task_name in acc_tasks and result['acc'] > best_dev_acc: best_dev_acc = result['acc'] save_model = True if task_name in corr_tasks and result['corr'] > best_dev_acc: best_dev_acc = result['corr'] save_model = True if task_name in mcc_tasks and result['mcc'] > best_dev_acc: best_dev_acc = result['mcc'] save_model = True if save_model and is_main_process(): logger.info("***** Save model *****") model_to_save = student_model.module if hasattr(student_model, 'module') else student_model model_name = WEIGHTS_NAME output_model_file = os.path.join(args.output_dir, model_name) output_config_file = os.path.join(args.output_dir, CONFIG_NAME) torch.save(model_to_save.state_dict(), output_model_file) model_to_save.config.to_json_file(output_config_file) tokenizer.save_vocabulary(args.output_dir) # Test mnli-mm if student_config.distillation_config["use_pred_states"] and task_name == "mnli": task_name = "mnli-mm" processor = processors[task_name]() if not os.path.exists(args.output_dir + '-MM'): os.makedirs(args.output_dir + '-MM') eval_examples = processor.get_dev_examples(args.data_dir) eval_features = convert_examples_to_features( eval_examples, label_list, args.max_seq_length, tokenizer, output_mode) eval_data, eval_labels = get_tensor_data(output_mode, eval_features) logger.info("***** Running mm evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.eval_batch_size) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4) result = do_eval(student_model, task_name, eval_dataloader, device, output_mode, eval_labels, num_labels, args.amp) result['global_step'] = global_step tmp_output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt") result_to_file(result, tmp_output_eval_file, global_step, train_summary_writer) task_name = 'mnli' if oncloud: logging.info(mox.file.list_directory(args.output_dir, recursive=True)) logging.info(mox.file.list_directory('.', recursive=True)) mox.file.copy_parallel(args.output_dir, args.data_url) mox.file.copy_parallel('.', args.data_url) student_model.train() train_summary_writer.flush() train_summary_writer.close() if __name__ == "__main__": start_time = time.time() main() print("Total time taken:", time.time() - start_time)
PyTorch/LanguageModeling/BERT/triton/dist4l
dist4l
README
# Deploying the BERT model on Triton Inference Server This folder contains instructions for deployment to run inference on Triton Inference Server as well as a detailed performance analysis. The purpose of this document is to help you with achieving the best inference performance. ## Table of contents - [Solution overview](#solution-overview) - [Introduction](#introduction) - [Deployment process](#deployment-process) - [Setup](#setup) - [Quick Start Guide](#quick-start-guide) - [Performance](#performance) - [Offline scenario](#offline-scenario) - [Offline: NVIDIA A30, ONNX Runtime with FP16](#offline-nvidia-a30-onnx-runtime-with-fp16) - [Offline: NVIDIA A30, ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-a30-onnx-runtime-with-fp16-backend-accelerator-tensorrt) - [Offline: NVIDIA A30, NVIDIA TensorRT with FP16](#offline-nvidia-a30-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA A30, NVIDIA PyTorch with FP16](#offline-nvidia-a30-pytorch-with-fp16) - [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16) - [Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-dgx-1-1x-v100-32gb-onnx-runtime-with-fp16-backend-accelerator-tensorrt) - [Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16](#offline-nvidia-dgx-1-1x-v100-32gb-pytorch-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-dgx-a100-1x-a100-80gb-onnx-runtime-with-fp16-backend-accelerator-tensorrt) - [Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16](#offline-nvidia-dgx-a100-1x-a100-80gb-pytorch-with-fp16) - [Offline: NVIDIA T4, ONNX Runtime with FP16](#offline-nvidia-t4-onnx-runtime-with-fp16) - [Offline: NVIDIA T4, ONNX Runtime with FP16, Backend accelerator TensorRT](#offline-nvidia-t4-onnx-runtime-with-fp16-backend-accelerator-tensorrt) - [Offline: NVIDIA T4, NVIDIA TensorRT with FP16](#offline-nvidia-t4-nvidia-tensorrt-with-fp16) - [Offline: NVIDIA T4, PyTorch with FP16](#offline-nvidia-t4-pytorch-with-fp16) - [Advanced](#advanced) - [Prepare configuration](#prepare-configuration) - [Step by step deployment process](#step-by-step-deployment-process) - [Latency explanation](#latency-explanation) - [Release notes](#release-notes) - [Changelog](#changelog) - [Known issues](#known-issues) ## Solution overview ### Introduction The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. This README provides step-by-step deployment instructions for models generated during training (as described in the [model README](../readme.md)). Additionally, this README provides the corresponding deployment scripts that ensure optimal GPU utilization during inferencing on Triton Inference Server. ### Deployment process The deployment process consists of two steps: 1. Conversion. The purpose of conversion is to find the best performing model format supported by Triton Inference Server. Triton Inference Server uses a number of runtime backends such as [TensorRT](https://developer.nvidia.com/tensorrt), [LibTorch](https://github.com/triton-inference-server/pytorch_backend) and [ONNX Runtime](https://github.com/triton-inference-server/onnxruntime_backend) to support various model types. Refer to the [Triton documentation](https://github.com/triton-inference-server/backend#where-can-i-find-all-the-backends-that-are-available-for-triton) for a list of available backends. 2. Configuration. Model configuration on Triton Inference Server, which generates necessary [configuration files](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md). After deployment Triton inference server is used for evaluation of converted model in two steps: 1. Accuracy tests. Produce results which are tested against given accuracy thresholds. 2. Performance tests. Produce latency and throughput results for offline (static batching) and online (dynamic batching) scenarios. All steps are executed by provided runner script. Refer to [Quick Start Guide](#quick-start-guide) ## Setup Ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [PyTorch NGC container 21.10](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch) * [Triton Inference Server NGC container 21.10](https://ngc.nvidia.com/catalog/containers/nvidia:tritonserver) * [NVIDIA CUDA](https://docs.nvidia.com/cuda/archive//index.html) * [NVIDIA Ampere](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/), [Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) or [Turing](https://www.nvidia.com/en-us/geforce/turing/) based GPU ## Quick Start Guide Running the following scripts will build and launch the container with all required dependencies for native PyTorch as well as Triton Inference Server. This is necessary for running inference and can also be used for data download, processing, and training of the model. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples.git cd DeepLearningExamples/PyTorch/LanguageModeling/BERT/ ``` 2. Build and run a container that extends NGC PyTorch with the Triton client libraries and necessary dependencies. ``` ./triton/dist4l/scripts/docker/build.sh ./triton/dist4l/scripts/docker/interactive.sh ``` 3. Prepare dataset. Runner requires script downloading and preparing publicly available datasets to run the process. Script will download necessary data to DeepLearningExamples/PyTorch/LanguageModeling/BERT/datasets catalog. ``` ./triton/dist4l/runner/prepare_datasets.sh ``` 4. Execute runner script (please mind, the run scripts are prepared per NVIDIA GPU). ``` NVIDIA A30: ./triton/dist4l/runner/start_NVIDIA-A30.sh NVIDIA DGX-1 (1x V100 32GB): ./triton/dist4l/runner/start_NVIDIA-DGX-1-\(1x-V100-32GB\).sh NVIDIA DGX A100 (1x A100 80GB): ./triton/dist4l/runner/start_NVIDIA-DGX-A100-\(1x-A100-80GB\).sh NVIDIA T4: ./triton/dist4l/runner/start_NVIDIA-T4.sh ``` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Offline scenario The offline scenario assumes the client and server are located on the same host. The tests uses: - tensors are passed through shared memory between client and server, the Perf Analyzer flag `shared-memory=system` is used - single request is send from client to server with static size of batch #### Offline: NVIDIA A30, ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------| | GPU | NVIDIA A30 | | Backend | ONNX Runtime | | Backend accelerator | - | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.37 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 830.0 | 0.0 | 0.1 | 0.0 | 0.0 | 1.0 | 0.0 | 0.0 | 1.2 | 1.2 | 1.2 | 1.3 | 1.2 | | 8 | 1 | 2053.9 | 0.0 | 0.2 | 0.0 | 0.1 | 3.6 | 0.0 | 0.0 | 3.9 | 3.9 | 3.9 | 3.9 | 3.9 | | 16 | 1 | 2128.0 | 0.0 | 0.4 | 0.1 | 0.1 | 6.9 | 0.0 | 0.0 | 7.5 | 7.5 | 7.6 | 7.7 | 7.5 | #### Offline: NVIDIA A30, ONNX Runtime with FP16, Backend accelerator TensorRT Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA A30 | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |16 | | Number of model instances |1| | Accelerator Precision | FP16 | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1219.0 | 0.0 | 0.2 | 0.0 | 0.1 | 0.5 | 0.0 | 0.0 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | | 8 | 1 | 3616.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.9 | 0.0 | 0.0 | 2.2 | 2.2 | 2.2 | 2.4 | 2.2 | | 16 | 1 | 4256.0 | 0.0 | 0.2 | 0.0 | 0.1 | 3.5 | 0.0 | 0.0 | 3.7 | 3.8 | 3.8 | 3.8 | 3.7 | #### Offline: NVIDIA A30, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------| | GPU | NVIDIA A30 | | Backend | NVIDIA TensorRT | | Backend accelerator | - | | Precision | FP16 | | Model format | NVIDIA TensorRT | | Max batch size | 16 | | Number of model instances | 1 | | NVIDIA TensorRT Capture CUDA Graph | Disabled | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1214.0 | 0.0 | 0.2 | 0.0 | 0.1 | 0.5 | 0.0 | 0.0 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | | 8 | 1 | 3456.0 | 0.0 | 0.2 | 0.0 | 0.1 | 2.0 | 0.0 | 0.0 | 2.3 | 2.3 | 2.4 | 2.5 | 2.3 | | 16 | 1 | 3968.0 | 0.0 | 0.4 | 0.0 | 0.1 | 3.5 | 0.0 | 0.0 | 4.0 | 4.0 | 4.0 | 4.2 | 4.0 | #### Offline: NVIDIA A30, PyTorch with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:------------------| | GPU | NVIDIA A30 | | Backend | PyTorch | | Backend accelerator | - | | Precision | FP16 | | Model format | TorchScript Trace | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 558.0 | 0.0 | 0.1 | 0.0 | 0.0 | 1.6 | 0.0 | 0.0 | 1.8 | 1.8 | 1.8 | 1.8 | 1.8 | | 8 | 1 | 2061.9 | 0.0 | 0.3 | 0.0 | 0.1 | 1.6 | 1.9 | 0.0 | 3.9 | 3.9 | 3.9 | 3.9 | 3.9 | | 16 | 1 | 2429.6 | 0.0 | 0.2 | 0.0 | 0.1 | 1.6 | 4.7 | 0.0 | 6.6 | 6.6 | 6.6 | 6.6 | 6.6 | #### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------------------| | GPU | NVIDIA DGX-1 (1x V100 32GB) | | Backend | ONNX Runtime | | Backend accelerator | - | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.37 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 521.0 | 0.0 | 0.3 | 0.1 | 0.1 | 1.4 | 0.0 | 0.0 | 1.9 | 2.1 | 2.1 | 2.2 | 1.9 | | 8 | 1 | 2048.0 | 0.0 | 0.3 | 0.0 | 0.1 | 3.5 | 0.0 | 0.0 | 3.9 | 3.9 | 3.9 | 4.0 | 3.9 | | 16 | 1 | 2304.0 | 0.0 | 0.3 | 0.1 | 0.1 | 6.4 | 0.0 | 0.0 | 6.9 | 7.1 | 7.2 | 7.3 | 6.9 | #### Offline: NVIDIA DGX-1 (1x V100 32GB), ONNX Runtime with FP16, Backend accelerator TensorRT Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------------------| | GPU | NVIDIA DGX-1 (1x V100 32GB) | | Backend | ONNX Runtime | | Backend accelerator | NVIDIA TensorRT | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | FP16 | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 789.0 | 0.0 | 0.3 | 0.1 | 0.1 | 0.8 | 0.0 | 0.0 | 1.2 | 1.4 | 1.5 | 1.6 | 1.2 | | 8 | 1 | 2536.0 | 0.0 | 0.3 | 0.1 | 0.1 | 2.6 | 0.0 | 0.0 | 3.1 | 3.3 | 3.4 | 3.5 | 3.1 | | 16 | 1 | 2992.0 | 0.0 | 0.3 | 0.0 | 0.1 | 4.9 | 0.0 | 0.0 | 5.3 | 5.5 | 5.6 | 5.6 | 5.3 | #### Offline: NVIDIA DGX-1 (1x V100 32GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------------------| | GPU | NVIDIA DGX-1 (1x V100 32GB) | | Backend | NVIDIA TensorRT | | Backend accelerator | - | | Precision | FP16 | | Model format | NVIDIA TensorRT | | Max batch size | 16 | | Number of model instances | 1 | | NVIDIA TensorRT Capture CUDA Graph | Disabled | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 803.0 | 0.0 | 0.3 | 0.1 | 0.2 | 0.7 | 0.0 | 0.0 | 1.2 | 1.3 | 1.3 | 1.4 | 1.2 | | 8 | 1 | 2576.0 | 0.0 | 0.2 | 0.1 | 0.2 | 2.6 | 0.0 | 0.0 | 3.1 | 3.1 | 3.1 | 3.2 | 3.1 | | 16 | 1 | 2928.0 | 0.0 | 0.3 | 0.1 | 0.2 | 4.8 | 0.0 | 0.0 | 5.4 | 5.5 | 5.5 | 5.6 | 5.4 | #### Offline: NVIDIA DGX-1 (1x V100 32GB), PyTorch with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------------------| | GPU | NVIDIA DGX-1 (1x V100 32GB) | | Backend | PyTorch | | Backend accelerator | - | | Precision | FP16 | | Model format | TorchScript Trace | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 325.0 | 0.0 | 0.3 | 0.0 | 0.1 | 2.6 | 0.0 | 0.0 | 3.0 | 3.3 | 3.4 | 3.5 | 3.1 | | 8 | 1 | 2200.0 | 0.0 | 0.3 | 0.0 | 0.1 | 2.6 | 0.7 | 0.0 | 3.6 | 3.6 | 3.6 | 3.7 | 3.6 | | 16 | 1 | 2784.0 | 0.0 | 0.2 | 0.0 | 0.1 | 2.5 | 2.8 | 0.0 | 5.7 | 5.7 | 5.8 | 5.8 | 5.7 | #### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-------------------------------| | GPU | NVIDIA DGX A100 (1x A100 80GB) | | Backend | ONNX Runtime | | Backend accelerator | - | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.37 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 679.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.3 | 0.0 | 0.0 | 1.5 | 1.5 | 1.5 | 1.5 | 1.5 | | 8 | 1 | 3360.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.2 | 0.0 | 0.0 | 2.4 | 2.4 | 2.4 | 2.5 | 2.4 | | 16 | 1 | 4032.0 | 0.0 | 0.1 | 0.0 | 0.1 | 3.7 | 0.0 | 0.0 | 4.0 | 4.0 | 4.0 | 4.1 | 4.0 | #### Offline: NVIDIA DGX A100 (1x A100 80GB), ONNX Runtime with FP16, Backend accelerator TensorRT Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-------------------------------| | GPU | NVIDIA DGX A100 (1x A100 80GB) | | Backend | ONNX Runtime | | Backend accelerator | NVIDIA TensorRT | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | FP16 | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1471.0 | 0.0 | 0.1 | 0.0 | 0.1 | 0.5 | 0.0 | 0.0 | 0.7 | 0.7 | 0.7 | 0.7 | 0.7 | | 8 | 1 | 5648.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.2 | 0.0 | 0.0 | 1.4 | 1.4 | 1.4 | 1.4 | 1.4 | | 16 | 1 | 6960.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.1 | 0.0 | 0.0 | 2.3 | 2.3 | 2.3 | 2.4 | 2.3 | #### Offline: NVIDIA DGX A100 (1x A100 80GB), NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-------------------------------| | GPU | NVIDIA DGX A100 (1x A100 80GB) | | Backend | NVIDIA TensorRT | | Backend accelerator | - | | Precision | FP16 | | Model format | NVIDIA TensorRT | | Max batch size | 16 | | Number of model instances | 1 | | NVIDIA TensorRT Capture CUDA Graph | Disabled | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 1561.0 | 0.0 | 0.1 | 0.0 | 0.1 | 0.5 | 0.0 | 0.0 | 0.6 | 0.6 | 0.7 | 0.7 | 0.6 | | 8 | 1 | 5096.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.4 | 0.0 | 0.0 | 1.5 | 1.5 | 1.5 | 5.2 | 1.6 | | 16 | 1 | 6544.0 | 0.0 | 0.1 | 0.0 | 0.1 | 2.3 | 0.0 | 0.0 | 2.3 | 2.3 | 2.4 | 6.5 | 2.4 | #### Offline: NVIDIA DGX A100 (1x A100 80GB), PyTorch with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-------------------------------| | GPU | NVIDIA DGX A100 (1x A100 80GB) | | Backend | PyTorch | | Backend accelerator | - | | Precision | FP16 | | Model format | TorchScript Trace | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 482.0 | 0.0 | 0.1 | 0.0 | 0.0 | 1.9 | 0.0 | 0.0 | 2.1 | 2.1 | 2.1 | 2.1 | 2.1 | | 8 | 1 | 3368.0 | 0.0 | 0.1 | 0.0 | 0.0 | 1.7 | 0.5 | 0.0 | 2.4 | 2.4 | 2.4 | 2.5 | 2.4 | | 16 | 1 | 4272.0 | 0.0 | 0.1 | 0.0 | 0.1 | 1.7 | 1.8 | 0.0 | 3.7 | 3.8 | 3.8 | 3.8 | 3.7 | #### Offline: NVIDIA T4, ONNX Runtime with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------| | GPU | NVIDIA T4 | | Backend | ONNX Runtime | | Backend accelerator | - | | Precision | FP16 | | Model format | ONNX | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.37 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 488.5 | 0.0 | 0.5 | 0.1 | 0.1 | 1.4 | 0.0 | 0.0 | 2.0 | 2.1 | 2.2 | 2.3 | 2.0 | | 8 | 1 | 792.0 | 0.0 | 0.5 | 0.1 | 0.1 | 9.4 | 0.0 | 0.0 | 10.1 | 10.2 | 10.2 | 10.2 | 10.1 | | 16 | 1 | 816.0 | 0.0 | 0.5 | 0.1 | 0.1 | 18.8 | 0.0 | 0.0 | 19.5 | 19.6 | 19.7 | 19.7 | 19.5 | #### Offline: NVIDIA T4, ONNX Runtime with FP16, Backend accelerator TensorRT Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:-----------------------------| | GPU |NVIDIA T4 | | Backend |ONNX Runtime | | Backend accelerator |NVIDIA TensorRT| | Precision |FP16 | | Model format |ONNX | | Max batch size |16 | | Number of model instances |1| | Accelerator Precision | FP16 | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 749.0 | 0.0 | 0.4 | 0.0 | 0.1 | 0.8 | 0.0 | 0.0 | 1.3 | 1.4 | 1.5 | 1.5 | 1.3 | | 8 | 1 | 1536.0 | 0.0 | 0.6 | 0.1 | 0.1 | 4.5 | 0.0 | 0.0 | 5.2 | 5.3 | 5.4 | 5.4 | 5.2 | | 16 | 1 | 1648.0 | 0.0 | 0.5 | 0.1 | 0.1 | 9.0 | 0.0 | 0.0 | 9.6 | 9.9 | 9.9 | 10.0 | 9.7 | #### Offline: NVIDIA T4, NVIDIA TensorRT with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:----------------| | GPU | NVIDIA T4 | | Backend | NVIDIA TensorRT | | Backend accelerator | - | | Precision | FP16 | | Model format | NVIDIA TensorRT | | Max batch size | 16 | | Number of model instances | 1 | | NVIDIA TensorRT Capture CUDA Graph | Disabled | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.34 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 798.2 | 0.0 | 0.4 | 0.0 | 0.1 | 0.7 | 0.0 | 0.0 | 1.2 | 1.4 | 1.5 | 1.5 | 1.2 | | 8 | 1 | 1544.0 | 0.0 | 0.5 | 0.0 | 0.1 | 4.5 | 0.0 | 0.0 | 5.1 | 5.2 | 5.2 | 5.3 | 5.1 | | 16 | 1 | 1632.0 | 0.0 | 0.5 | 0.0 | 0.1 | 9.0 | 0.0 | 0.0 | 9.7 | 10.0 | 10.0 | 10.1 | 9.7 | #### Offline: NVIDIA T4, PyTorch with FP16 Our results were obtained using the following configuration: | Parameter Name | Parameter Value | |:-----------------------------|:------------------| | GPU | NVIDIA T4 | | Backend | PyTorch | | Backend accelerator | - | | Precision | FP16 | | Model format | TorchScript Trace | | Max batch size | 16 | | Number of model instances | 1 | | Accelerator Precision | - | | Max Seq Length | 384 | | SQuAD v1.1 F1 Score | 83.35 | <summary>Results Table</summary> | Batch | Concurrency | Inferences/Second | Client Send (ms) | Network+Server Send/Recv (ms) | Server Queue (ms) | Server Compute Input (ms) | Server Compute Infer (ms) | Server Compute Output (ms) | Client Recv (ms) | p50 latency (ms) | p90 latency (ms) | p95 latency (ms) | p99 latency (ms) | avg latency (ms) | |--------:|--------------:|--------------------:|-------------------:|--------------------------------:|--------------------:|----------------------------:|----------------------------:|-----------------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:|-------------------:| | 1 | 1 | 447.0 | 0.0 | 0.5 | 0.1 | 0.0 | 1.6 | 0.0 | 0.0 | 2.2 | 2.2 | 2.3 | 2.8 | 2.2 | | 8 | 1 | 928.0 | 0.0 | 0.5 | 0.1 | 0.1 | 1.6 | 6.3 | 0.0 | 8.6 | 8.7 | 8.7 | 8.8 | 8.6 | | 16 | 1 | 944.0 | 0.0 | 0.4 | 0.1 | 0.1 | 1.5 | 14.6 | 0.0 | 16.6 | 17.0 | 17.1 | 17.2 | 16.7 | ## Advanced ### Prepare configuration You can use the environment variables to set the parameters of your inference configuration. Triton deployment scripts support several inference runtimes listed in the table below: Example values of some key variables in one configuration: ``` FORMAT="onnx" PRECISION="fp16" EXPORT_FORMAT="onnx" EXPORT_PRECISION="fp16" ACCELERATOR="trt" ACCELERATOR_PRECISION="fp16" CAPTURE_CUDA_GRAPH="0" BATCH_SIZE="16" MAX_BATCH_SIZE="16" MAX_SEQ_LENGTH="384" CHECKPOINT_VARIANT="dist-4l-qa" CHECKPOINT_DIR=${CHECKPOINTS_DIR}/${CHECKPOINT_VARIANT} TRITON_MAX_QUEUE_DELAY="1" TRITON_GPU_ENGINE_COUNT="1" TRITON_PREFERRED_BATCH_SIZES="1" ``` | Inference runtime | Mnemonic used in scripts | |-------------------|--------------------------| | [TorchScript Tracing](https://pytorch.org/docs/stable/jit.html) | `ts-trace` | | [TorchScript Scripting](https://pytorch.org/docs/stable/jit.html) | `ts-script` | | [ONNX](https://onnx.ai) | `onnx` | | [NVIDIA TensorRT](https://developer.nvidia.com/tensorrt) | `trt` | The deployment process consists of the following steps. 1. Export step. We export the model into the format set by `${EXPORT_FORMAT}`, with precision set by `${EXPORT_PRECISION}`. 2. Convert step. We convert the exported model from `${EXPORT_FORMAT}` into `${FORMAT}`. The precision of the model in `${FORMAT}` is set by `${PRECISION}`. 3. Deploy step. We create the triton model repository. The most common use-case scenario is to export the model into ONNX format, and then convert it into TensorRT. `${ACCELERATOR}` here refers to the accelerator of the ONNX format, which can be either `trt` or `none`. All the above values are set in the `triton/scripts/setup_parameters.sh` file. ### Step by step deployment process Commands described below can be used for exporting, converting and profiling the model. #### Clone Repository IMPORTANT: This step is executed on the host computer. <details> <summary>Clone Repository Command</summary> ```shell git clone https://github.com/NVIDIA/DeepLearningExamples.git cd DeepLearningExamples/PyTorch/LanguageModeling/BERT/ ``` </details> #### Setup Environment Setup the environment in the host computer and start Triton Inference Server. <details> <summary>Setup Environment Command</summary> ```shell source ./triton/dist4l/scripts/setup_environment.sh ./triton/dist4l/scripts/docker/triton_inference_server.sh ``` </details> #### Setup Container Build and run a container that extends the NGC PyTorch container with the Triton Inference Server client libraries and dependencies. <details> <summary>Setup Container Command</summary> ```shell ./triton/dist4l/scripts/docker/build.sh ./triton/dist4l/scripts/docker/interactive.sh ``` </details> #### Setup Parameters and Environment Setup the environment and deployment parameters inside interactive container. <details> <summary>Setup Environment Command</summary> ```shell source ./triton/dist4l/scripts/setup_environment.sh ``` </details> <details> <summary>Setup Parameters Command</summary> ```shell source ./triton/dist4l/scripts/setup_parameters.sh ``` </details> #### Prepare Dataset and Checkpoint Prepare datasets and checkpoint if not run automatic evaluation scripts. <details> <summary>Prepare Datasets Command</summary> ```shell ./triton/dist4l/runner/prepare_datasets.sh ``` </details> <details> <summary>Prepare Checkpoint Command</summary> Download checkpoint from ``` https://catalog.ngc.nvidia.com/orgs/nvidia/dle/models/bert_pyt_ckpt_distilled_4l_288d_qa_squad11_amp ``` Create the directory for checkpoint and copy the downloaded checkpoint content: ```shell mkdir -p ${CHECKPOINTS_DIR}/dist-4l-qa ``` </details> #### Export Model Export model from Python source to desired format (e.g. Savedmodel or TorchScript) <details> <summary>Export Model Command</summary> ```shell python3 triton/export_model.py \ --input-path triton/model.py \ --input-type pyt \ --output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-type ${EXPORT_FORMAT} \ --dataloader triton/dataloader.py \ --ignore-unknown-parameters \ --onnx-opset 13 \ ${FLAG} \ \ --config-file ${CHECKPOINT_DIR}/config.json \ --checkpoint ${CHECKPOINT_DIR}/pytorch_model.bin \ --precision ${EXPORT_PRECISION} \ \ --vocab-file ${CHECKPOINT_DIR}/vocab.txt \ --max-seq-length ${MAX_SEQ_LENGTH} \ --predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \ --batch-size ${MAX_BATCH_SIZE} ``` </details> #### Convert Model Convert the model from training to inference format (e.g. TensorRT). <details> <summary>Convert Model Command</summary> ```shell model-navigator convert \ --model-name ${MODEL_NAME} \ --model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \ --output-path ${SHARED_DIR}/converted_model \ --target-formats ${FORMAT} \ --target-precisions ${PRECISION} \ --launch-mode local \ --override-workspace \ --verbose \ \ --onnx-opsets 13 \ --inputs input__0:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --inputs input__1:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --inputs input__2:${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH}:int32 \ --min-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --max-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --opt-shapes input__0=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__1=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ input__2=${MAX_BATCH_SIZE},${MAX_SEQ_LENGTH} \ --max-batch-size ${MAX_BATCH_SIZE} \ --tensorrt-max-workspace-size 8589934592 \ --atol 2 output__0=5.0 \ output__1=5.0 \ --rtol 1 output__0=5.0 \ output__1=5.0 ``` </details> #### Deploy Model Configure the model on Triton Inference Server. Generate the configuration from your model repository. <details> <summary>Deploy Model Command</summary> ```shell model-navigator triton-config-model \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --model-version 1 \ --model-path ${SHARED_DIR}/converted_model \ --model-format ${CONFIG_FORMAT} \ --model-control-mode ${TRITON_LOAD_MODEL_METHOD} \ --verbose \ --load-model \ --load-model-timeout-s 100 \ \ --backend-accelerator ${ACCELERATOR} \ --tensorrt-precision ${ACCELERATOR_PRECISION} \ --max-batch-size ${MBS} \ --preferred-batch-sizes ${TRITON_PREFERRED_BATCH_SIZES} \ --max-queue-delay-us ${TRITON_MAX_QUEUE_DELAY} \ --engine-count-per-device gpu=${TRITON_GPU_ENGINE_COUNT} ``` </details> #### Prepare Triton Profiling Data Prepare data used for profiling on Triton server. <details> <summary>Prepare Triton Profiling Data Command</summary> ```shell mkdir -p ${SHARED_DIR}/input_data python triton/prepare_input_data.py \ --dataloader triton/dataloader.py \ --input-data-dir ${SHARED_DIR}/input_data \ \ --batch-size ${MAX_BATCH_SIZE} \ --max-seq-length ${MAX_SEQ_LENGTH} \ --predict-file ${DATASETS_DIR}/data/squad/v1.1/dev-v1.1.json \ --vocab-file ${CHECKPOINT_DIR}/vocab.txt ``` </details> #### Triton Performance Offline Test We want to maximize throughput. It assumes you have your data available for inference or that your data saturate to maximum batch size quickly. Triton Inference Server supports offline scenarios with static batching. Static batching allows inference requests to be served as they are received. The largest improvements to throughput come from increasing the batch size due to efficiency gains in the GPU with larger batches. <details> <summary>Triton Performance Offline Test Command</summary> ```shell python triton/run_performance_on_triton.py \ --model-repository ${MODEL_REPOSITORY_PATH} \ --model-name ${MODEL_NAME} \ --input-data ${SHARED_DIR}/input_data/data.json \ --input-shapes input__0:${MAX_SEQ_LENGTH} \ --input-shapes input__1:${MAX_SEQ_LENGTH} \ --input-shapes input__2:${MAX_SEQ_LENGTH} \ --batch-sizes ${BATCH_SIZE} \ --number-of-triton-instances ${TRITON_INSTANCES} \ --number-of-model-instances ${TRITON_GPU_ENGINE_COUNT} \ --batching-mode static \ --evaluation-mode offline \ --performance-tool perf_analyzer \ --result-path ${SHARED_DIR}/triton_performance_offline.csv ``` </details> ### Latency explanation A typical Triton Inference Server pipeline can be broken down into the following steps: 1. The client serializes the inference request into a message and sends it to the server (Client Send). 2. The message travels over the network from the client to the server (Network). 3. The message arrives at the server and is deserialized (Server Receive). 4. The request is placed on the queue (Server Queue). 5. The request is removed from the queue and computed (Server Compute). 6. The completed request is serialized in a message and sent back to the client (Server Send). 7. The completed message then travels over the network from the server to the client (Network). 8. The completed message is deserialized by the client and processed as a completed inference request (Client Receive). Generally, for local clients, steps 1-4 and 6-8 will only occupy a small fraction of time, compared to step 5. As backend deep learning systems like Jasper are rarely exposed directly to end users, but instead only interfacing with local front-end servers, for the sake of Jasper, we can consider that all clients are local. ## Release Notes We’re constantly refining and improving our performance on AI and HPC workloads even on the same hardware with frequent updates to our software stack. For our latest performance data refer to these pages for [AI](https://developer.nvidia.com/deep-learning-performance-training-inference) and [HPC](https://developer.nvidia.com/hpc-application-performance) benchmarks. ### Changelog ### Known issues - There are no known issues with this model.
TensorFlow2/Segmentation/Contrib/UNet3P
UNet3P
evaluate
""" Evaluation script used to calculate accuracy of trained model """ import os import hydra from omegaconf import DictConfig import tensorflow as tf from tensorflow.keras import mixed_precision from data_generators import data_generator from utils.general_utils import join_paths, set_gpus, suppress_warnings from models.model import prepare_model from losses.loss import DiceCoefficient from losses.unet_loss import unet3p_hybrid_loss def evaluate(cfg: DictConfig): """ Evaluate or calculate accuracy of given model """ # suppress TensorFlow and DALI warnings suppress_warnings() if cfg.USE_MULTI_GPUS.VALUE: # change number of visible gpus for evaluation set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS) # update batch size according to available gpus data_generator.update_batch_size(cfg) if cfg.OPTIMIZATION.AMP: print("Enabling Automatic Mixed Precision(AMP) training") policy = mixed_precision.Policy('mixed_float16') mixed_precision.set_global_policy(policy) if cfg.OPTIMIZATION.XLA: print("Enabling Automatic Mixed Precision(XLA) training") tf.config.optimizer.set_jit(True) # create model strategy = None if cfg.USE_MULTI_GPUS.VALUE: # multi gpu training using tensorflow mirrored strategy strategy = tf.distribute.MirroredStrategy( cross_device_ops=tf.distribute.HierarchicalCopyAllReduce() ) print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync)) with strategy.scope(): optimizer = tf.keras.optimizers.Adam( learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE ) # optimizer if cfg.OPTIMIZATION.AMP: optimizer = mixed_precision.LossScaleOptimizer( optimizer, dynamic=True ) dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES) dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef) model = prepare_model(cfg, training=True) else: optimizer = tf.keras.optimizers.Adam( learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE ) # optimizer if cfg.OPTIMIZATION.AMP: optimizer = mixed_precision.LossScaleOptimizer( optimizer, dynamic=True ) dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES) dice_coef = tf.keras.metrics.MeanMetricWrapper(name="dice_coef", fn=dice_coef) model = prepare_model(cfg, training=True) model.compile( optimizer=optimizer, loss=unet3p_hybrid_loss, metrics=[dice_coef], ) # weights model path checkpoint_path = join_paths( cfg.WORK_DIR, cfg.CALLBACKS.MODEL_CHECKPOINT.PATH, f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5" ) assert os.path.exists(checkpoint_path), \ f"Model weight's file does not exist at \n{checkpoint_path}" # TODO: verify without augment it produces same results # load model weights model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True) model.summary() # data generators val_generator = data_generator.get_data_generator(cfg, "VAL", strategy) validation_steps = data_generator.get_iterations(cfg, mode="VAL") # evaluation metric evaluation_metric = "dice_coef" if len(model.outputs) > 1: evaluation_metric = f"{model.output_names[0]}_dice_coef" result = model.evaluate( x=val_generator, steps=validation_steps, workers=cfg.DATALOADER_WORKERS, return_dict=True, ) # return computed loss, validation accuracy, and it's metric name return result, evaluation_metric @hydra.main(version_base=None, config_path="configs", config_name="config") def main(cfg: DictConfig): """ Read config file and pass to evaluate method """ result, evaluation_metric = evaluate(cfg) print(result) print(f"Validation dice coefficient: {result[evaluation_metric]}") if __name__ == "__main__": main()
PyTorch/DrugDiscovery/MoFlow/scripts
scripts
benchmark_training
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 Chengxi Zang # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. gpus=${1:-1} bs=${2:-512} prec=${3:-amp} flags="${@:4}" if [[ "${gpus}" == "1" ]]; then cmd="python" else cmd="torchrun --nproc_per_node=${gpus}" fi cmd="${cmd} \ /workspace/moflow_pyt/moflow/runtime/train.py \ --batch_size ${bs} \ --steps 200 \ --eval_epochs -1 \ --save_epochs -1 \ --cuda_graph \ ${flags} \ " if [ $prec == "amp" ]; then cmd="${cmd} --amp" fi set -x bash -c "${cmd}"
PyTorch/SpeechSynthesis/Tacotron2/notebooks/triton
triton
waveglow_tensorrt_config
name: "waveglow-tensorrt" platform: "tensorrt_plan" default_model_filename: "model.plan" max_batch_size: 0 input { name: "mel" data_type: TYPE_FP16 dims: [1, 80, -1, 1] } input { name: "z" data_type: TYPE_FP16 dims: [1, 8, -1, 1] } output { name: "audio" data_type: TYPE_FP16 dims: [1, -1] }
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules
quick_schedules
e2e_mask_rcnn_X_101_32x8d_FPN_quick
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/FAIR/20171220/X-101-32x8d" BACKBONE: CONV_BODY: "R-101-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True BATCH_SIZE_PER_IMAGE: 256 ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False RESNETS: STRIDE_IN_1X1: False NUM_GROUPS: 32 WIDTH_PER_GROUP: 8 MASK_ON: True DATASETS: TRAIN: ("coco_2014_minival",) TEST: ("coco_2014_minival",) INPUT: MIN_SIZE_TRAIN: 600 MAX_SIZE_TRAIN: 1000 MIN_SIZE_TEST: 800 MAX_SIZE_TEST: 1000 DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.005 WEIGHT_DECAY: 0.0001 STEPS: (1500,) MAX_ITER: 2000 IMS_PER_BATCH: 2 TEST: IMS_PER_BATCH: 2
TensorFlow/Translation/GNMT/variable_mgr
variable_mgr
constants
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Constants used in tf_cnn_benchmarks.""" from enum import Enum class NetworkTopology(str, Enum): """Network topology describes how multiple GPUs are inter-connected. """ # DGX-1 uses hybrid cube mesh topology with the following device peer to peer # matrix: # DMA: 0 1 2 3 4 5 6 7 # 0: Y Y Y Y Y N N N # 1: Y Y Y Y N Y N N # 2: Y Y Y Y N N Y N # 3: Y Y Y Y N N N Y # 4: Y N N N Y Y Y Y # 5: N Y N N Y Y Y Y # 6: N N Y N Y Y Y Y # 7: N N N Y Y Y Y Y DGX1 = "dgx1" # V100 in GCP are connected with the following device peer to peer matrix. # In this topology, bandwidth of the connection depends on if it uses NVLink # or PCIe link. # DMA: 0 1 2 3 4 5 6 7 # 0: Y Y Y Y N Y N N # 1: Y Y Y Y N N N N # 2: Y Y Y Y N N N Y # 3: Y Y Y Y N N N N # 4: N N N N Y Y Y Y # 5: Y N N N Y Y Y Y # 6: N N N N Y Y Y Y # 7: N N Y N Y Y Y Y GCP_V100 = "gcp_v100"
TensorFlow/Detection/SSD/models/research/object_detection/metrics
metrics
io_utils
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Common IO utils used in offline metric computation. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import csv def write_csv(fid, metrics): """Writes metrics key-value pairs to CSV file. Args: fid: File identifier of an opened file. metrics: A dictionary with metrics to be written. """ metrics_writer = csv.writer(fid, delimiter=',') for metric_name, metric_value in metrics.items(): metrics_writer.writerow([metric_name, str(metric_value)])
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular
tabular
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .base_tabular_generator import BaseTabularGenerator from .chunked_tabular_generator import ChunkedBaseTabularGenerator from .ctgan import CTGANGenerator from .gaussian_generator import GaussianGenerator from .kde_generator import KDEGenerator from .random import RandomMVGenerator from .uniform_generator import UniformGenerator # Does not include CTGAN tabular_generators_classes = { 'kde': KDEGenerator, 'random': RandomMVGenerator, 'gaussian': GaussianGenerator, 'uniform': UniformGenerator, 'ctgan': CTGANGenerator, } tabular_generators_types_to_classes = { cls.__class__.__name__: k for k, cls in tabular_generators_classes .items() }
PyTorch/Forecasting/TFT/triton/runner
runner
config_NVIDIA-T4
checkpoints: - name: electricity_bin url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-electricity/versions/22.11.0_amp/zip - name: traffic_bin url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-traffic/versions/22.11.0_amp/zip configurations: - accelerator: none batch_size: - 1 - 2 - 4 - 8 - 16 - 32 - 64 - 128 - 256 - 512 - 1024 batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024 capture_cuda_graph: 0 checkpoint_variant: electricity_bin dataset: electricity_bin device: gpu export_format: onnx export_precision: fp32 format: trt max_batch_size: 1024 precision: fp16 request_count: 500 triton_gpu_engine_count: 2 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 512 1024 - accelerator: none batch_size: - 1 - 2 - 4 - 8 - 16 - 32 - 64 - 128 - 256 - 512 - 1024 batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024 capture_cuda_graph: 0 checkpoint_variant: traffic_bin dataset: traffic_bin device: gpu export_format: onnx export_precision: fp32 format: trt max_batch_size: 1024 precision: fp16 request_count: 500 triton_gpu_engine_count: 2 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 512 1024 - accelerator: none batch_size: - 1 - 2 - 4 - 8 - 16 - 32 - 64 - 128 - 256 - 512 - 1024 batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024 capture_cuda_graph: 0 checkpoint_variant: electricity_bin dataset: electricity_bin device: gpu export_format: ts-trace export_precision: fp32 format: ts-trace max_batch_size: 1024 precision: fp16 request_count: 500 triton_gpu_engine_count: 2 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 512 1024 - accelerator: none batch_size: - 1 - 2 - 4 - 8 - 16 - 32 - 64 - 128 - 256 - 512 - 1024 batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024 capture_cuda_graph: 0 checkpoint_variant: traffic_bin dataset: traffic_bin device: gpu export_format: ts-trace export_precision: fp32 format: ts-trace max_batch_size: 1024 precision: fp16 request_count: 500 triton_gpu_engine_count: 2 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 512 1024 container_version: '22.11' datasets: - name: electricity_bin - name: traffic_bin datasets_dir: datasets framework: PyTorch model_name: TFT triton_container_image: null triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
TensorFlow2/Recommendation/SIM/sim/layers
layers
item_sequence_interaction
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from sim.layers.item_item_interaction import DIENAttentionUnit from sim.layers.rnn import AUGRU @tf.function def compute_item_sequence_attention(item, sequence, mask, attention_op): """ Computes normalized attention scores between a given sequence and item """ scores_unnormalized = attention_op((sequence, tf.expand_dims(item, axis=1))) if mask is not None: min_value_for_dtype = scores_unnormalized.dtype.min mask = tf.equal(mask, tf.ones_like(mask)) paddings = tf.ones_like(scores_unnormalized) * min_value_for_dtype scores_unnormalized = tf.where(mask, scores_unnormalized, paddings) # [B, 1, T] scores = tf.nn.softmax(scores_unnormalized) return scores class DINItemSequenceInteractionBlock(tf.keras.layers.Layer): def __init__(self, item_item_interaction): super(DINItemSequenceInteractionBlock, self).__init__() self.item_item_interaction = item_item_interaction @tf.function def call(self, inputs): item, item_sequence, mask = inputs # compute attention scores between item_sequence and item scores = compute_item_sequence_attention( item, item_sequence, mask, self.item_item_interaction ) # equivalent to tf.matmul(scores[:,None,:], item_sequence) return ( tf.reduce_sum(tf.expand_dims(scores, axis=-1) * item_sequence, [1]), scores, ) class DIENItemSequenceInteractionBlock(tf.keras.layers.Layer): def __init__(self, hidden_size: int): super(DIENItemSequenceInteractionBlock, self).__init__() self.hidden_size = hidden_size # hidden=emb_dim*6 self.item_item_interaction = DIENAttentionUnit(self.hidden_size) self.layer_1 = tf.keras.layers.GRU(self.hidden_size, return_sequences=True) self.layer_2 = AUGRU(self.hidden_size) @tf.function def call(self, inputs): """ Returns: - final_seq_repr: final vector representation of the sequence - features_layer_1: for auxiliary loss """ item, item_sequence, mask = inputs # compute h(1),...,h(T) from e(1),...,e(T) features_layer_1 = self.layer_1(item_sequence) # compute attention scores between features_layer_1 and item attention_scores = compute_item_sequence_attention( item, features_layer_1, mask, self.item_item_interaction ) attention_scores = tf.expand_dims(attention_scores, -1) # compute h'(T) final_seq_repr = self.layer_2([features_layer_1, attention_scores]) # [B, 1, E] -> [B, E] final_seq_repr = tf.squeeze(final_seq_repr) return final_seq_repr, features_layer_1
TensorFlow/Segmentation/VNet
VNet
README
# V-Net Medical For Tensorflow This repository provides a script and recipe to train the V-Net model to achieve state of the art accuracy, and is tested and maintained by NVIDIA. V-Net model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider [nnU-Net for PyTorch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/nnUNet) as a substitute for your requirements. ## Table of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Multi-dataset](#multi-dataset) * [Training process](#training-process) * [Inference process](#inference-process) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The V-Net model for Tensorflow, called V-Net_Medical_TF is a convolutional neural network for 3D image segmentation. This repository contains a V-Net implementation and is based on the paper [V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation](https://arxiv.org/pdf/1606.04797), with small alterations to support a new dataset for Hippocampus segmentation. This implementation differs from the original in the following ways: * Convolution filters are 3x3x3 instead of 5x5x5 to increase performance without negatively affecting the accuracy * The number of upsample/downsample levels is reduced to 3 to accommodate the different input size * PReLU activation has been substituted by ReLU to increase performance without negatively affecting the accuracy This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2.2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture V-Net was first introduced by Fausto Milletari, Nassir Navab, Seyed-Ahmad Ahmadi in the paper: [V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation](https://arxiv.org/pdf/1606.04797). V-Net allows for seamless segmentation of 3D images, with high accuracy and performance, and can be adapted to solve many different segmentation problems. The following figure shows the construction of the standard V-Net model and its different components. V-Net is composed of a contractive and an expanding path, that aims at building a bottleneck in its centermost part through a combination of convolution and downsampling. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients in order to improve the training. ![V-Net](images/vnet.png) Figure 1. VNet architecture ### Default configuration V-Net consists of a contractive (left-side) and expanding (right-side) path. It repeatedly applies unpadded convolutions followed by max pooling for downsampling. Every step in the expanding path consists of an upsampling of the feature maps and a concatenation with the correspondingly cropped feature map from the contractive path. The following performance optimizations were implemented in this model: * XLA support. * Reduced size of convolutional filters to 3x3x3 * ReLU activation used instead of PReLU * Batchnorm used for training ### Feature support matrix The following features are supported by this model. | **Feature** | **V-Net_Medical_TF** | |:---:|:--------:| | Horovod Multi-GPU (NCCL) | Yes | | Automatic Mixed Precision (AMP) | Yes | The following features were implemented in this model: * Data-parallel multi-GPU training with Horovod. * Mixed precision support with TensorFlow Automatic Mixed Precision (TF-AMP), which enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable. * Tensor Core operations to maximize throughput using NVIDIA Volta GPUs. #### Features * Multi-GPU training with Horovod Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage). * Automatic Mixed Precision (AMP) Enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta and Turing GPUs automatically. The TensorFlow framework code makes all necessary model changes internally. In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. ### Enabling mixed precision In order to enable mixed precision training, the following environment variables must be defined with the correct value before the training starts: ``` TF_ENABLE_AUTO_MIXED_PRECISION=1 ``` Exporting these variables ensures that loss scaling is performed correctly and automatically. By supplying the `--amp` flag to the `main.py` script while training in FP32, the following variables are set to their correct value for mixed precision training inside the `./utils/runner.py` script: ``` if params['use_amp']: LOGGER.log("TF AMP is activated - Experimental Feature") os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ## Setup The following section lists the requirements in order to start training the V-Net Medical model. ### Requirements This repository contains a `Dockerfile` which extends the TensorFlow NGC container and encapsulates some additional dependencies. Aside from these dependencies, ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow) - GPU-based architecture: - [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning DGX Documentation: * [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) * [Accessing And Pulling From The NGC container registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) * [Running Tensorflow](https://docs.nvidia.com/deeplearning/dgx/tensorflow-release-notes/running.html#running) ## Quick Start Guide To train your model using mixed precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the V-Net model on the Hippocampus head and body dataset present on the [medical segmentation decathlon website](http://medicaldecathlon.com/). 1. Clone the repository ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow/Segmentation/V-Net_tf ``` 2. Download and preprocess the dataset The V-Net script `main.py` operates on Hippocampus head and body data from the [medical segmentation decathlon](http://medicaldecathlon.com/). Upon registration, the challenge's data is made available through the following link: * [Medical segmentation decathlon Google Drive](https://drive.google.com/drive/folders/1HqEgzS8BV2c7xYNrZdEAnrHk7osJJ--2) The script `download_dataset.py` is provided for data download. It is possible to select the destination folder when downloading the files by using the `--data_dir` flag. For example: ``` python download_dataset.py --data_dir ./data ``` Once downloaded the data using the `download_dataset.py` script, it can be used to run the training and benchmark scripts described below, by pointing `main.py` to its location using the `--data_dir` flag. **Note:** Masks are only provided for training data. 3. Build the V-Net TensorFlow container After Docker is correctly set up, the V-Net TensorFlow container can be built with: ``` docker build -t vnet_tf . ``` 4. Start an interactive session in the NGC container to run training/inference. Run the previously built Docker container: ``` $ docker run --runtime=nvidia --rm -it --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -v /path/to/dataset:/data vnet_tf:latest bash ``` **Note:** Ensure to mount your dataset using the `-v` flag to make it available for training inside the NVIDIA Docker container. Data can be downloaded as well from inside the container. 5. Start training To run training on all training data for a default configuration (for example 1/4/8 GPUs FP32/TF-AMP), run the `vnet_train.py` script in the `./examples` directory: ``` usage: vnet_train.py [-h] --data_dir DATA_DIR --model_dir MODEL_DIR --gpus {1, 8} --batch_size BATCH_SIZE --epochs EPOCHS OPTIONAL [--amp] ``` For example: ``` python examples/vnet_train.py --data_dir ./data/Task04_Hippocampus --model_dir ./tmp --gpus 8 --batch_size 260 --epochs 50 --amp ``` To run training on 9/10 of the training data and perform evaluation on the remaining 1/10, run the `vnet_train_and_evaluate.py` script in the `./examples` directory: ``` usage: vnet_train_and_evaluate.py [-h] --data_dir DATA_DIR --model_dir MODEL_DIR --gpus {1, 8} --batch_size BATCH_SIZE --epochs EPOCHS OPTIONAL [--amp] ``` This is useful to estimate the convergence point of the training. For example: ``` python examples/vnet_train_and_evaluate.py --data_dir ./data/Task04_Hippocampus --model_dir ./tmp --gpus 1 --batch_size 8 --epochs 260 --amp ``` 6. Start inference/predictions To run inference on a checkpointed model, run the `vnet_predict.py` script in the `./examples` directory: ``` usage: vnet_predict.py [-h] --data_dir DATA_DIR --model_dir MODEL_DIR --batch_size BATCH_SIZE OPTIONAL [--amp] ``` For example: ``` python examples/vnet_predict.py --data_dir ./data/Task04_Hippocampus --model_dir ./tmp --batch_size 4 --amp ``` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: * `main.py`: Serves as the entry point to the application. * `Dockerfile`: Container with the basic set of dependencies to run V-Net * `requirements.txt`: Set of extra requirements for running V-Net * `download_data.py`: Automatically downloads the dataset for training The `utils/` folder encapsulates the necessary tools to train and perform inference using V-Net. Its main components are: * `runner.py`: Implements the logic for training and inference * `data_loader.py`: Implements the data loading and augmentation * `hooks/profiling_hook.py`: Collects different metrics to be used for benchmarking * `hooks/training_hook.py`: Collects different metrics to be used for training * `hooks/evaluation_hook.py`: Collects different metrics to be used for testing * `var_storage.py`: Helper functions for TF-AMP The model/ folder contains information about the building blocks of V-Net and the way they are assembled. Its contents are: * `layers.py`: Defines the different blocks that are used to assemble V-Net * `vnet.py`: Defines the model architecture using the blocks from the `layers.py` script Other folders included in the root directory are: * `dllogger/`: Contains the utilities for logging * `examples/`: Provides examples for training and benchmarking V-Net * `images/`: Contains a model diagram ### Parameters The complete list of the available parameters for the main.py script contains: * `--exec_mode`: Select the execution mode to run the model (default: `train_and_predict`) * `--data_normalization`: Select the type of data normalization (default: `zscore`) * `--activation`: Select the activation to be used in the network (default: `prelu`) * `--resize_interpolator`: Select the interpolator for image resizing (default: `nearest_neighbor`) * `--loss`: Loss function to be utilized for training (default: `dice`) * `--normalization_layer`: Type of normalization layer to be used in the model (default: `batchnorm`) * `--pooling`: Type of pooling layer to be used in the model (default: `conv_pool`) * `--upsampling`: Type of upsampling layer to be used in the model (default: `transposed_conv`) * `--seed`: Random seed value (default: `0`) * `--input_shape`: Target resize dimension for input samples (default: `[32 32 32]`) * `--upscale_blocks`: Number of upscale blocks with the depth of their residual component (default: `[3 3 3]`) * `--downscale_blocks`: Number of downscale blocks with the depth of their residual component (default: `[3 3]`) * `--model_dir`: Set the output directory for information related to the model (default: `result/`) * `--convolution_size`: Size of the convolutional kernel filters (default: `3`) * `--batch_size`: Number of samples processed per execution step * `--log_every`: Log every this number of epochs (default: `100`) * `--warmup_steps`: Initial number of steps that will not be benchmarked as the model starts running (default: `200`) * `--train_epochs`: Number of times that training will go through the entire dataset * `--optimizer`: Optimizer to be used during training (default: `rmsprop`) * `--base_lr`: Model’s learning rate (default: `0.01`) * `--momentum`: Momentum coefficient for model’s optimizer (default: `0.99`) * `--train_split`: Proportion of the dataset that will become the training set (default: `0.9`) * `--split_seed`: Random seed for the splitting of the dataset between training and validation * `--model_dir`: Path where checkpoints and information related to the model will be stored * `--data_dir`: Path to the dataset * `--augment`: Enable data augmentation (default: `False`) * `--benchmark`: Enable performance benchmarking (default: `False`) * `--amp`: Enable automatic mixed precision (default: `False`) * `--xla`: Enable xla (default: `False`) ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command line option, for example: ``` python main.py usage: main.py [-h] --exec_mode {train,predict,train_and_predict,train_and_evaluate} [--data_normalization {zscore}] [--activation {relu}] [--resize_interpolator {linear}] [--loss {dice}] [--normalization_layer {batchnorm}] [--pooling {conv_pool}] [--upsampling {transposed_conv}] [--seed SEED] [--input_shape INPUT_SHAPE [INPUT_SHAPE ...]] [--upscale_blocks UPSCALE_BLOCKS [UPSCALE_BLOCKS ...]] [--downscale_blocks DOWNSCALE_BLOCKS [DOWNSCALE_BLOCKS ...]] [--convolution_size {3}] --batch_size BATCH_SIZE [--log_every LOG_EVERY] [--warmup_steps WARMUP_STEPS] [--train_epochs TRAIN_EPOCHS] [--optimizer {rmsprop}] [--base_lr BASE_LR] [--momentum MOMENTUM] [--train_split TRAIN_SPLIT] [--split_seed SPLIT_SEED] --model_dir MODEL_DIR --data_dir DATA_DIR [--benchmark] [--amp] [--xla] [--augment] ``` ### Getting the data The V-Net model was trained on the Hippocampus dataset from [medical segmentation decathlon](http://medicaldecathlon.com/). Test images provided by the organization were used to produce the resulting masks for submission. The objective is to produce a set of masks that segment the data as accurately as possible. Medical segmentation decathlon (MSD) datasets are conformed by the following elements: * `dataset.json` contains a high level description of the contents of the dataset * `ImagesTr` contains the training images as Nifti files * `LabelsTr` contains the training labels as Nifti files * `ImagesTs` contains the test images as Nifti files #### Dataset guidelines The process of loading, normalizing and augmenting the data contained in the dataset can be found in the `data_loader.py` script. Initially, data is loaded from a `Nifti` file and converted to NumPy arrays with the use of SimpleItk, with target dimensions specified through `--input_shape`. These NumPy arrays are fed to the model through `tf.data.Dataset.from_tensor_slices()`, in order to achieve high performance. Intensities on the volumes are then normalized using the method specified in `--data_normalization`, whereas labels are one-hot encoded for their later use. If augmentation is enabled, the following set of augmentation techniques are applied: * Random horizontal flipping * Random vertical flipping ### Training process #### Optimizer The model trains for 80 epochs with the following hyperparameters: * RMSProp optimizer with momentum = 0.0 * Base learning rate = 0.0001 ### Inference process To run inference on a checkpointed model, run the script below, although it requires a pre-trained model checkpoint and tokenized input. ``` python examples/vnet_predict.py --data_dir ./data/Task04_Hippocampus --model_dir ./tmp --batch_size {N} [--amp] ``` This script should produce the prediction results over a set of masks which will be located in `./tmp/eval`. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking Starting from CuDNN 7.6.2, enhanced support for 3D convolutions in mixed precision has been introduced to our containers. This enhanced support accelerates even further both training and inference, while maintaining the reduction of the model's memory footprint characteristic of mixed precision training. ![Pascal_vs_Volta](images/1gpu_p_vs_v.png) ![Pascal_vs_Volta](images/8gpu_p_vs_v.png) In the two figures above, it's displayed the difference in throughput for P100 and V100 GPUs when training V-Net in a single and multi-GPU setup. We do this for different batch sizes. For both single and multi-GPU, training V-Net using mixed precision in Volta GPUs (V100) is at least 2x faster than in Pascal (P100). ![Pascal_vs_Volta](images/infer_p_vs_v.png) The figure above displays the difference in throughput for P100 and V100 GPUs when performing inference using V-Net. We do this for different batch sizes. In general, V-Net inference when using mixed precision in Volta GPUs (V100) is at least 2x faster than in Pascal (P100). The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark training, run the script `vnet_benchmark.py` in the `./examples` directory. ``` usage: vnet_benchmark.py [-h] --data_dir DATA_DIR --model_dir MODEL_DIR --mode train --gpus {1, 8} --batch_size BATCH_SIZE OPTIONAL [--amp] ``` This script will by default run 200 warm-up iterations and benchmark the performance during training in the next 200 iterations. #### Inference performance benchmark To benchmark inference, run the script `vnet_benchmark.py` in the `./examples` directory. ``` usage: vnet_benchmark.py [-h] --data_dir DATA_DIR --model_dir MODEL_DIR --mode predict --gpus {1, 8} --batch_size BATCH_SIZE OPTIONAL [--amp] ``` This script will by default run 200 warm-up iterations and benchmark the performance during inference in the next 200 iterations. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results Dataset is divided on training data (samples with ground truth) and test data (unlabelled). In order to obtain instant feedback on the quality of the model, test data is put aside and training and evaluation is performed on the original training set. For training, 90% of the traning data is used, while for validation it is used the remaining 10%, which is treated as validation data. This validation data remains unseen during training and it is used exclusively to calculate the final accuracy of the model. ##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the `./examples/vnet_train_and_evaluate.py` script in the `nvcr.io/nvidia/tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. To train until convergence in FP32 using 1GPU, run: ``` python examples/vnet_train_and_evaluate.py --gpus 1 --batch_size 2 --base_lr 0.0001 --epochs 80 --data_dir ./data/Task04_Hippocampus/ --model_dir /tmp ``` To train until convergence in FP32 using 8GPU, run: ``` python examples/vnet_train_and_evaluate.py --gpus 8 --batch_size 2 --base_lr 0.0001 --epochs 320 --data_dir ./data/Task04_Hippocampus/ --model_dir /tmp ``` To train until convergence in FP16 using 1GPU, run: ``` python examples/vnet_train_and_evaluate.py --gpus 1 --batch_size 2 --base_lr 0.0001 --epochs 80 --data_dir ./data/Task04_Hippocampus/ --model_dir /tmp --amp ``` To train until convergence in FP16 using 8GPU, run: ``` python examples/vnet_train_and_evaluate.py --gpus 8 --batch_size 2 --base_lr 0.0001 --epochs 320 --data_dir ./data/Task04_Hippocampus/ --model_dir /tmp --amp ``` | GPUs | Batch size / GPU | Anterior dice - FP32 | Anterior dice - mixed precision | Time to train - FP32 | Time to train - mixed precision | Time to train speedup (FP32 to mixed precision)| |---------|-------|----------|----------|----------|---------|---------| | 1 | 2 | 0.8537 | 0.8533 | 11 min | 11 min | 1.0 | | 8 | 2 | 0.8409 | 0.8398 | 2 min | 2 min | 1.0 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ### Training performance results ##### Training performance: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the `./examples/vnet_benchmark.py` scripts in the `nvcr.io/nvidia/tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. Performance numbers (in images per second) were averaged over 200 iterations. For example: ``` python examples/vnet_benchmark.py --data_dir ./data/Task04_Hippocampus --model_dir /tmp --mode train --gpus {1,8} --batch_size {8,16,32} [--amp] ``` | GPUs | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | |---|----|---------|---------|------|------|------| | 1 | 2 | 117.82 | 114.11 | 0.97 | N/A | N/A | | 1 | 8 | 277.46 | 368.93 | 1.33 | N/A | N/A | | 1 | 16 | 339.56 | 427.02 | 1.26 | N/A | N/A | | 1 | 32 | 444.98 | 639.03 | 1.44 | N/A | N/A | | 8 | 2 | 584.23 | 497.05 | 0.85 | 4.96 | 4.36 | | 8 | 8 | 1783.44 | 1851.75 | 1.04 | 6.43 | 5.02 | | 8 | 16 | 2342.51 | 2821.20 | 1.20 | 6.90 | 6.61 | | 8 | 32 | 3189.86 | 4282.41 | 1.34 | 7.17 | 6.70 | To achieve these same results, follow the [Quick start guide](#quick-start-guide) outlined above. #### Inference performance results ##### Inference performance: NVIDIA DGX-1 (1x V100 16GB) Our results were obtained by running the `./examples/vnet_benchmark.py` scripts in the `nvcr.io/nvidia/tensorflow:20.06-tf1-py3` NGC container on NVIDIA DGX-1 with 1x V100 16GB GPUs. For example: ``` python examples/vnet_benchmark.py --data_dir ./data/Task04_Hippocampus --model_dir /tmp --mode predict --gpus 1 --batch_size {8, 16, 32} [--amp] ``` FP16 | Batch size | Sequence length | Throughput Avg | Latency Avg | Latency 90% |Latency 95% |Latency 99% | |----|------------|---------|-------|-------|-------|-------| | 8 | 32x32x32x1 | 1428.89 | 6.59 | 8.25 | 8.57 | 9.19 | | 16 | 32x32x32x1 | 2010.71 | 10.23 | 14.04 | 14.77 | 16.20 | | 32 | 32x32x32x1 | 3053.85 | 16.36 | 26.08 | 27.94 | 31.58 | FP32 | Batch size | Sequence length | Throughput Avg | Latency Avg | Latency 90% | Latency 95% | Latency 99% | |----|------------|---------|-------|-------|-------|-------| | 8 | 32x32x32x1 | 1009.75 | 8.89 | 10.53 | 10.84 | 11.45 | | 16 | 32x32x32x1 | 1262.54 | 14.92 | 18.71 | 19.43 | 20.85 | | 32 | 32x32x32x1 | 1496.08 | 27.32 | 37.27 | 39.17 | 42.90 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ## Release notes V-Net model for TensorFlow will no longer be available after 04/30/2022, please consider [UNet for 3D image segmentation in TensorFlow](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_3D_Medical) or [nnU-Net for PyTorch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/nnUNet) as a substitute for your requirements. ### Changelog April 2021 * Ceased maintenance of the model June 2020 * Updated training and inference accuracy * Updated training and inference performance November 2019 * Initial release ### Known issues There are no known issues in this release.
PyTorch/Classification/GPUNet/triton/08ms-D/runner
runner
config_NVIDIA-DGX-1-(1x-V100-32GB)
batching: dynamic checkpoints: - name: 0.8ms-D url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p1_pyt_ckpt/versions/21.12.0_amp/zip configurations: - checkpoint: 0.8ms-D parameters: backend_accelerator: trt checkpoint: 0.8ms-D device_kind: gpu export_format: onnx export_precision: fp16 format: onnx max_batch_size: 64 number_of_model_instances: 2 precision: fp16 tensorrt_capture_cuda_graph: 0 torch_jit: none container_version: '21.12' datasets: - name: imagenet datasets_dir: datasets ensemble_model_name: null framework: PyTorch measurement_steps_offline: 8 measurement_steps_online: 32 model_name: GPUnet performance_tool: model_analyzer triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3 triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
PyTorch/SpeechRecognition/Jasper/notebooks
notebooks
JasperTRT
#!/usr/bin/env python # coding: utf-8 # In[ ]: # Copyright 2019 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # <img src=http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png style="width: 90px; float: right;"> # # # Jasper Inference For TensorRT 6 # This Jupyter notebook provides scripts to perform high-performance inference using NVIDIA TensorRT 6 with dynamic shapes. # Jasper is a neural acoustic model for speech recognition. Its network architecture is designed to facilitate fast GPU inference. # NVIDIA TensorRT is a platform for high-performance deep learning inference. It includes a deep learning inference optimizer and runtime that delivers low latency and high-throughput for deep learning inference applications. # After optimizing the compute-intensive acoustic model with NVIDIA TensorRT, inference throughput increased by up to 1.8x over native PyTorch. # ## 1. Overview # # The Jasper model is an end-to-end neural acoustic model for automatic speech recognition (ASR) that provides near state-of-the-art results on LibriSpeech among end-to-end ASR models without any external data. The Jasper architecture of convolutional layers was designed to facilitate fast GPU inference, by allowing whole sub-blocks to be fused into a single GPU kernel. This is important for meeting strict real-time requirements of ASR systems in deployment.The results of the acoustic model are combined with the results of external language models to get the top-ranked word sequences corresponding to a given audio segment. This post-processing step is called decoding. # # The original paper is Jasper: An End-to-End Convolutional Neural Acoustic Model https://arxiv.org/pdf/1904.03288.pdf. # # ### 1.1 Model architecture # By default the model configuration is Jasper 10x5 with dense residuals. A Jasper BxR model has B blocks, each consisting of R repeating sub-blocks. # Each sub-block applies the following operations in sequence: 1D-Convolution, Batch Normalization, ReLU activation, and Dropout. # In the original paper Jasper is trained with masked convolutions, which masks out the padded part of an input sequence in a batch before the 1D-Convolution. # For inference masking is not used. The reason for this is that in inference, the original mask operation does not achieve better accuracy than without the mask operation on the test and development dataset. However, no masking achieves better inference performance especially after TensorRT optimization. # More information on the model architecture can be found in the [root folder](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/Jasper) # # ### 1.2 TensorRT Inference pipeline # The Jasper inference pipeline consists of 3 components: data preprocessor, acoustic model and greedy decoder. The acoustic model is the most compute intensive, taking more than 90% of the entire end-to-end pipeline. The acoustic model is the only component with learnable parameters and also what differentiates Jasper from the competition. So, we focus on the acoustic model for the most part. # For the non-TRT Jasper inference pipeline, all 3 components are implemented and run with native PyTorch. For the TensorRT inference pipeline, we show the speedup of running the acoustic model with TensorRT, while preprocessing and decoding are reused from the native PyTorch pipeline. # To run a model with TensorRT, we first construct the model in PyTorch, which is then exported into an ONNX file. Finally, a TensorRT engine is constructed from the ONNX file, serialized to TRT plan file, and also launched to do inference. # Note that TensorRT engine is being runtime optimized before serialization. TRT tries a vast set of options to find the strategy that performs best on user’s GPU - so it takes a few minutes. After the TRT plan file is created, it can be reused. # ### 1.3 Learning objectives # # This notebook demonstrates: # - Speed up Jasper Inference with TensorRT # - The use/download of fine-tuned NVIDIA Jasper models # - Use of Mixed Precision for Inference # ## 2. Requirements # # Please refer to Jasper TensorRT README.md # ## 3. Jasper Inference # # ### 3.1 Prepare Working Directory # In[ ]: import os if not 'workbookDir' in globals(): workbookDir = os.getcwd() + "/../" print('workbookDir: ' + workbookDir) os.chdir(workbookDir) # ### 3.2 Start a detached session in the NGC container # In[ ]: get_ipython().system('nvidia-docker run -it -d --rm --name "JasperTRT" --runtime=nvidia --shm-size=4g --ulimit memlock=-1 --ulimit stack=67108864 -v $PWD/data:/datasets -v $PWD/checkpoint:/checkpoints/ -v $PWD/result:/results/ -v $PWD:/workspace/jasper/ jasper:trt6 bash') # You can also specify single or multiple GPUs to run the container by adding "NV_GPU" before the "nvidia-docker run" command. For example, to specify GPU ID 2 to run the container, add "NV_GPU=2" before the "nvidia-docker run" command. You can use the command "nvidia-smi" to check your GPU IDs and utilization. # In[ ]: #check the container that you just started get_ipython().system('docker ps -a') # ### 3.3 Download and preprocess the dataset. # You will not need to download the dataset if you directly go to Section 5 to play with audio examples. # # If LibriSpeech http://www.openslr.org/12 has already been downloaded and preprocessed, no further steps in this subsection need to be taken. # If LibriSpeech has not been downloaded already, note that only a subset of LibriSpeech is typically used for inference (dev-* and test-*). LibriSpeech contains 1000 hours of 16kHz read English speech derived from public domain audiobooks from LibriVox project and has been carefully segmented and aligned. For more information, see paper [LIBRISPEECH: AN ASR CORPUS BASED ON PUBLIC DOMAIN AUDIO BOOKS paper](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf). # To acquire the inference subset of LibriSpeech run (does not require GPU): # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT bash trt/scripts/download_inference_librispeech.sh') # Once the data download is complete, the following folders should exist: # * /datasets/LibriSpeech/ # * dev-clean/ # * dev-other/ # * test-clean/ # * test-other/ # # Since /datasets/ is mounted to <DATA_DIR> on the host, once the dataset is downloaded it is accessible from outside of the container at <DATA_DIR>/LibriSpeech. # # Next, preprocessing the data can be performed with the following command: # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT bash trt/scripts/preprocess_inference_librispeech.sh') # Once the data is preprocessed, the following additional files should now exist: # # * /datasets/LibriSpeech/ # * librispeech-dev-clean-wav.json # * librispeech-dev-other-wav.json # * librispeech-test-clean-wav.json # * librispeech-test-other-wav.json # * dev-clean/ # * dev-other/ # * test-clean/ # * test-other/ # ### 3.4. Start TensorRT inference prediction # # Inside the container, use the following script to run inference with TensorRT. # You will need to set the parameters such as: # # # * `CHECKPOINT`: Model checkpoint path # * `TRT_PRECISION`: "fp32" or "fp16". Defines which precision kernels will be used for TensorRT engine (default: "fp32") # * `PYTORCH_PRECISION`: "fp32" or "fp16". Defines which precision will be used for inference in PyTorch (default: "fp32") # * `TRT_PREDICTION_PATH`: file to store inference prediction results generated with TensorRT # * `PYT_PREDICTION_PATH`: file to store inference prediction results generated with native PyTorch # * `DATASET`: LibriSpeech dataset (default: dev-clean) # * `NUM_STEPS`: Number of inference steps (default: -1) # * `BATCH_SIZE`: Mini batch size (default: 1) # * `NUM_FRAMES`: cuts/pads all pre-processed feature tensors to this length. 100 frames ~ 1 second of audio (default: 3600) # # In[ ]: get_ipython().system('nvidia-docker exec -it -e CHECKPOINT=/checkpoints/jasper_fp16.pt -e TRT_PREDICTION_PATH=/results/result.txt JasperTRT bash trt/scripts/trt_inference.sh') # ### 3.5. Start TensorRT Inference Benchmark # # Run the following commmand to run inference benchmark with TensorRT inside the container. # # You will need to set the parameters such as: # # * `CHECKPOINT`: Model checkpoint path # * `NUM_STEPS`: number of inference steps. If -1 runs inference on entire dataset. (default: -1) # * `NUM_FRAMES`: cuts/pads all pre-processed feature tensors to this length. 100 frames ~ 1 second of audio (default: 512) # * `BATCH_SIZE`: data batch size (default: 64) # * `TRT_PRECISION`: "fp32" or "fp16". Defines which precision kernels will be used for TensorRT engine (default: "fp32") # * `PYTORCH_PRECISION`: "fp32" or "fp16". Defines which precision will be used for inference in PyTorch (default: "fp32") # * `CSV_PATH`: file to store CSV results (default: "/results/res.csv") # In[ ]: get_ipython().system('nvidia-docker exec -it -e CHECKPOINT=/checkpoints/jasper_fp16.pt -e TRT_PREDICTION_PATH=/results/benchmark.txt JasperTRT bash trt/scripts/trt_inference_benchmark.sh') # ### 4. Automatic Mixed Precision # # Mixed precision is the combined use of different numerical precisions in a computational method. Mixed precision training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of Tensor Cores in the Volta and Turing architecture, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. # # Using mixed precision training requires two steps: # # * Porting the model to use the FP16 data type where appropriate. # * Adding loss scaling to preserve small gradient values. # # The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK. # For information about: # # How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. # # Techniques used for mixed precision training, see the blog [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/). # # APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). # # To enable mixed precision, we can specify the variables `TRT_PRECISION` and `PYTORCH_PRECISION` by setting them to `TRT_PRECISION=fp16` and `PYTORCH_PRECISION=fp16` when running the inference. To run the TensorRT inference benchmarking using automatic mixed precision: # In[ ]: get_ipython().system('nvidia-docker exec -it -e CHECKPOINT=/checkpoints/jasper_fp16.pt -e TRT_PREDICTION_PATH=/results/benchmark.txt -e TRT_PRECISION=fp16 -e PYTORCH_PRECISION=fp16 -e CSV_PATH=/result/res_fp16.csv JasperTRT bash trt/scripts/trt_inference_benchmark.sh') # From the performance metrics (pyt_infer) that you get from res.csv (for fp32) and res_fp16.csv (for automatic mixed precision) files, you can see that automatic mixed precision can speedup the inference efficiently compared to fp32. # ### 5. Play with audio examples # # You can perform inference using pre-trained checkpoints which takes audio file (in .wav format) as input, and produces the corresponding text file. You can customize the content of the text file. For example, there are several examples of input files at "notebooks" dirctory and we can listen to example1.wav: # In[ ]: import IPython.display as ipd ipd.Audio('notebooks/example1.wav', rate=22050) # You can run inference using the trt/perf.py script: # * the checkpoint is passed as `--ckpt` argument # * `--model_toml` specifies the path to network configuration file (see examples in "config" directory) # * `--make_onnx` exports to ONNX file at the path if set # * `--engine_path` saves the engine file (*.plan) # # To create a new engine file (jasper.plan) for TensorRT and run it using fp32 (building the engine for the first time can take several minutes): # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT python trt/perf.py --ckpt_path /checkpoints/jasper_fp16.pt --wav=notebooks/example1.wav --model_toml=configs/jasper10x5dr_nomask.toml --make_onnx --onnx_path jasper.onnx --engine_path jasper.plan') # If you already have the engine file (jasper.plan), to run an existing engine file of TensorRT using fp32: # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT python trt/perf.py --wav=notebooks/example1.wav --model_toml=configs/jasper10x5dr_nomask.toml --use_existing_engine --engine_path jasper.plan --trt_fp16') # To run inference of the input audio file using automatic mixed precision, add the argument `--trt_fp16`. Using automatic mixed precision, the inference time can be reduced efficiently compared to that of using fp32 (building the engine for the first time can take several minutes): # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT python trt/perf.py --ckpt_path /checkpoints/jasper_fp16.pt --wav=notebooks/example1.wav --model_toml=configs/jasper10x5dr_nomask.toml --make_onnx --onnx_path jasper.onnx --engine_path jasper_fp16.plan --trt_fp16') # If you already have the engine file (jasper_fp16.plan), to run an existing engine file of TensorRT using automatic mixed precision: # In[ ]: get_ipython().system('nvidia-docker exec -it JasperTRT python trt/perf.py --wav=notebooks/example1.wav --model_toml=configs/jasper10x5dr_nomask.toml --use_existing_engine --engine_path jasper_fp16.plan --trt_fp16') # You can play with other examples at "notebooks" dirctory. You can also input your own audio files and generate the output text files in this way. # # For more information about TensorRT and building an engine file in Python, please see: https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#python_topics # In[ ]: #stop your container in the end get_ipython().system('docker stop JasperTRT') # ## 7. What's next # Now you are familiar with running Jasper inference with TensorRT, using automatic mixed precision, you may want to play with your own dataset, or train the model using your own dataset. For information on training, please see our Github repo: https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/Jasper
Tools/PyTorch/TimeSeriesPredictionPlatform/examples
examples
hp_search
/# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # More info here: https://hydra.cc/docs/plugins/optuna_sweeper/ python launch_training.py \ -m \ 'model.config.n_head=choice(1,2,4)' \ 'trainer.optimizer.lr=tag(log, interval(1e-5, 1e-2))' \ model=tft \ dataset=electricity \ trainer/criterion=quantile \ trainer.config.batch_size=1024 \ trainer.config.num_epochs=2 \ trainer.config.log_interval=-1 \ "evaluator.config.metrics=[P50, P90, MAE, MSE]" \ +optuna_objectives=[P50] \ hydra/sweeper=optuna \ hydra.sweeper.n_trials=3 \ hydra.sweeper.n_jobs=1 \ hydra.sweeper.storage=sqlite:////workspace/hp_search_multiobjective.db
TensorFlow2/LanguageModeling/BERT
BERT
tokenization
# coding=utf-8 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tokenization classes implementation. The file is forked from: https://github.com/google-research/bert/blob/master/tokenization.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six import tensorflow as tf import sentencepiece as spm SPIECE_UNDERLINE = "▁" def validate_case_matches_checkpoint(do_lower_case, init_checkpoint): """Checks whether the casing config is consistent with the checkpoint name.""" # The casing has to be passed in by the user and there is no explicit check # as to whether it matches the checkpoint. The casing information probably # should have been stored in the bert_config.json file, but it's not, so # we have to heuristically detect it to validate. if not init_checkpoint: return m = re.match("^.*?([A-Za-z0-9_-]+)/bert_model.ckpt", init_checkpoint) if m is None: return model_name = m.group(1) lower_models = [ "uncased_L-24_H-1024_A-16", "uncased_L-12_H-768_A-12", "multilingual_L-12_H-768_A-12", "chinese_L-12_H-768_A-12" ] cased_models = [ "cased_L-12_H-768_A-12", "cased_L-24_H-1024_A-16", "multi_cased_L-12_H-768_A-12" ] is_bad_config = False if model_name in lower_models and not do_lower_case: is_bad_config = True actual_flag = "False" case_name = "lowercased" opposite_flag = "True" if model_name in cased_models and do_lower_case: is_bad_config = True actual_flag = "True" case_name = "cased" opposite_flag = "False" if is_bad_config: raise ValueError( "You passed in `--do_lower_case=%s` with `--init_checkpoint=%s`. " "However, `%s` seems to be a %s model, so you " "should pass in `--do_lower_case=%s` so that the fine-tuning matches " "how the model was pre-training. If this error is wrong, please " "just comment out this check." % (actual_flag, init_checkpoint, model_name, case_name, opposite_flag)) def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with tf.io.gfile.GFile(vocab_file, "r") as reader: while True: token = convert_to_unicode(reader.readline()) if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def convert_by_vocab(vocab, items): """Converts a sequence of [tokens|ids] using the vocab.""" output = [] for item in items: output.append(vocab[item]) return output def convert_tokens_to_ids(vocab, tokens): return convert_by_vocab(vocab, tokens) def convert_ids_to_tokens(inv_vocab, ids): return convert_by_vocab(inv_vocab, ids) def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class FullTokenizer(object): """Runs end-to-end tokenziation.""" def __init__(self, vocab_file, do_lower_case=True): self.vocab = load_vocab(vocab_file) self.inv_vocab = {v: k for k, v in self.vocab.items()} self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): return convert_by_vocab(self.vocab, tokens) def convert_ids_to_tokens(self, ids): return convert_by_vocab(self.inv_vocab, ids) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenziation.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer. Returns: A list of wordpiece tokens. """ text = convert_to_unicode(text) output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically control characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False def preprocess_text(inputs, remove_space=True, lower=False): """Preprocesses data by removing extra space and normalize data. This method is used together with sentence piece tokenizer and is forked from: https://github.com/google-research/google-research/blob/master/albert/tokenization.py Args: inputs: The input text. remove_space: Whether to remove the extra space. lower: Whether to lowercase the text. Returns: The preprocessed text. """ outputs = inputs if remove_space: outputs = " ".join(inputs.strip().split()) if six.PY2 and isinstance(outputs, str): try: outputs = six.ensure_text(outputs, "utf-8") except UnicodeDecodeError: outputs = six.ensure_text(outputs, "latin-1") outputs = unicodedata.normalize("NFKD", outputs) outputs = "".join([c for c in outputs if not unicodedata.combining(c)]) if lower: outputs = outputs.lower() return outputs def encode_pieces(sp_model, text, sample=False): """Segements text into pieces. This method is used together with sentence piece tokenizer and is forked from: https://github.com/google-research/google-research/blob/master/albert/tokenization.py Args: sp_model: A spm.SentencePieceProcessor object. text: The input text to be segemented. sample: Whether to randomly sample a segmentation output or return a deterministic one. Returns: A list of token pieces. """ if six.PY2 and isinstance(text, six.text_type): text = six.ensure_binary(text, "utf-8") if not sample: pieces = sp_model.EncodeAsPieces(text) else: pieces = sp_model.SampleEncodeAsPieces(text, 64, 0.1) new_pieces = [] for piece in pieces: piece = printable_text(piece) if len(piece) > 1 and piece[-1] == "," and piece[-2].isdigit(): cur_pieces = sp_model.EncodeAsPieces(piece[:-1].replace( SPIECE_UNDERLINE, "")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: cur_pieces = cur_pieces[1:] else: cur_pieces[0] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(cur_pieces) else: new_pieces.append(piece) return new_pieces def encode_ids(sp_model, text, sample=False): """Segments text and return token ids. This method is used together with sentence piece tokenizer and is forked from: https://github.com/google-research/google-research/blob/master/albert/tokenization.py Args: sp_model: A spm.SentencePieceProcessor object. text: The input text to be segemented. sample: Whether to randomly sample a segmentation output or return a deterministic one. Returns: A list of token ids. """ pieces = encode_pieces(sp_model, text, sample=sample) ids = [sp_model.PieceToId(piece) for piece in pieces] return ids class FullSentencePieceTokenizer(object): """Runs end-to-end sentence piece tokenization. The interface of this class is intended to keep the same as above `FullTokenizer` class for easier usage. """ def __init__(self, sp_model_file): """Inits FullSentencePieceTokenizer. Args: sp_model_file: The path to the sentence piece model file. """ self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(sp_model_file) self.vocab = { self.sp_model.IdToPiece(i): i for i in six.moves.range(self.sp_model.GetPieceSize()) } def tokenize(self, text): """Tokenizes text into pieces.""" return encode_pieces(self.sp_model, text) def convert_tokens_to_ids(self, tokens): """Converts a list of tokens to a list of ids.""" return [self.sp_model.PieceToId(printable_text(token)) for token in tokens] def convert_ids_to_tokens(self, ids): """Converts a list of ids ot a list of tokens.""" return [self.sp_model.IdToPiece(id_) for id_ in ids]
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/AMP
AMP
convergence_1xA100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python3 main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode train_and_eval \ --use_amp \ --use_xla \ --model_dir ./output/ \ --data_dir /data/ \ --log_steps 500 \ --save_checkpoint_freq 10 \ --n_stages 4 \ --max_epochs 350 \ --train_batch_size 460 \ --train_img_size 300 \ --base_img_size 128 \ --lr_decay cosine \ --lr_init 0.005 \ --weight_decay .000005 \ --opt_epsilon 0.001 \ --moving_average_decay 0.9999 \ --eval_img_size 384 \ --eval_batch_size 100 \ --augmenter_name randaugment \ --raug_num_layers 2 \ --raug_magnitude 15 \ --cutmix_alpha 0 \ --mixup_alpha 0 \ --defer_img_mixing
TensorFlow2/Detection/Efficientdet/scripts/D0
D0
training-benchmark-AMP-1xA100-80G
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. bs=200 ep=1 lr=2.2 wu=25 ema=0.999 momentum=0.93 mkdir -p /tmp/training-benchmark-1xAMP-A100-80G rm -rf /tmp/training-benchmark-1xAMP-A100-80G/* mpirun -np 1 --allow-run-as-root --bind-to none \ -map-by slot -x LD_LIBRARY_PATH -x PATH \ -mca pml ob1 -mca btl ^openib \ -x CUDA_VISIBLE_DEVICES=0 \ python3 train.py \ --training_file_pattern=/workspace/coco/train-* \ --val_file_pattern=/workspace/coco/val-* \ --val_json_file=/workspace/coco/annotations/instances_val2017.json \ --model_name=efficientdet-d0 \ --model_dir=/tmp/training-benchmark-1xAMP-A100-80G \ --backbone_init=/workspace/checkpoints/efficientnet-b0-joc \ --batch_size=$bs \ --num_epochs=$ep \ --use_xla=True \ --amp=True \ --lr=$lr \ --warmup_epochs=$wu \ --benchmark=True \ --benchmark_steps=500 \ --enable_map_parallelization=False \ --hparams="moving_average_decay=$ema,momentum=$momentum" \ 2>&1 | tee /tmp/training-benchmark-1xAMP-A100-80G/train-benchmark.log
Tools/PyTorch/TimeSeriesPredictionPlatform/conf
conf
inference_config
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. defaults: - inference: native checkpoint: ???
PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler
lr_scheduler
__init__
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import importlib import os from .fairseq_lr_scheduler import FairseqLRScheduler LR_SCHEDULER_REGISTRY = {} def build_lr_scheduler(args, optimizer): return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer) def register_lr_scheduler(name): """Decorator to register a new LR scheduler.""" def register_lr_scheduler_cls(cls): if name in LR_SCHEDULER_REGISTRY: raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name)) if not issubclass(cls, FairseqLRScheduler): raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__)) LR_SCHEDULER_REGISTRY[name] = cls return cls return register_lr_scheduler_cls # automatically import any Python files in the optim/lr_scheduler/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('fairseq.optim.lr_scheduler.' + module)
PyTorch/SpeechRecognition/Jasper
Jasper
.gitignore
__pycache__ *.pt results/ datasets/ checkpoints/ *.swp *.swo *.swn
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/denoiser
denoiser
denoiserInstance
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "denoiserInstance.h" #include "cudaUtils.h" #include "dataShuffler.h" #include <stdexcept> using namespace nvinfer1; namespace tts { /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ DenoiserInstance::DenoiserInstance(TRTPtr<ICudaEngine>&& engine) : TimedObject("DenoiserInstance::infer()"), mStreamingInstance(std::move(engine)), mInBufferDevice( mStreamingInstance.getChunkSize() * mStreamingInstance.getMaxBatchSize()), mOutBufferDevice( mStreamingInstance.getChunkSize() * mStreamingInstance.getMaxBatchSize()) { // do nothing } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void DenoiserInstance::infer(const int batchSize, const float* const inputDevice, const int inputSpacing, const int* const inputLength, float* outputDevice) { startTiming(); cudaStream_t stream; if (cudaStreamCreate(&stream) != cudaSuccess) { throw std::runtime_error("Failed to create stream."); } const int chunkSize = mStreamingInstance.getChunkSize(); int maxNumSamples = 0; for (int i = 0; i < batchSize; ++i) { if (inputLength[i] > maxNumSamples) { maxNumSamples = inputLength[i]; } } mStreamingInstance.startInference(); for (int pos = 0; pos < maxNumSamples; pos += chunkSize) { DataShuffler::frameTransfer( inputDevice, mInBufferDevice.data(), inputSpacing, pos, chunkSize, batchSize, chunkSize, 0, stream); mStreamingInstance.inferNext( batchSize, mInBufferDevice.data(), mOutBufferDevice.data(), stream); DataShuffler::frameTransfer( mOutBufferDevice.data(), outputDevice, chunkSize, 0, chunkSize, batchSize, inputSpacing, pos, stream); } CudaUtils::sync(stream); cudaStreamDestroy(stream); stopTiming(); } } // namespace tts
PyTorch/Translation/GNMT/scripts
scripts
verify_dataset
#!/bin/bash # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. set -e DATASET_DIR='data/wmt16_de_en' ACTUAL_SRC_TRAIN=`cat ${DATASET_DIR}/train.tok.clean.bpe.32000.en |md5sum` EXPECTED_SRC_TRAIN='b7482095b787264a310d4933d197a134 -' if [[ $ACTUAL_SRC_TRAIN = $EXPECTED_SRC_TRAIN ]]; then echo "OK: correct ${DATASET_DIR}/train.tok.clean.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/train.tok.clean.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_TRAIN" echo "ERROR: found $ACTUAL_SRC_TRAIN" fi ACTUAL_TGT_TRAIN=`cat ${DATASET_DIR}/train.tok.clean.bpe.32000.de |md5sum` EXPECTED_TGT_TRAIN='409064aedaef5b7c458ff19a7beda462 -' if [[ $ACTUAL_TGT_TRAIN = $EXPECTED_TGT_TRAIN ]]; then echo "OK: correct ${DATASET_DIR}/train.tok.clean.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/train.tok.clean.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_TRAIN" echo "ERROR: found $ACTUAL_TGT_TRAIN" fi ACTUAL_SRC_VAL=`cat ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en |md5sum` EXPECTED_SRC_VAL='704c4ba8c8b63df1f6678d32b91438b5 -' if [[ $ACTUAL_SRC_VAL = $EXPECTED_SRC_VAL ]]; then echo "OK: correct ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_VAL" echo "ERROR: found $ACTUAL_SRC_VAL" fi ACTUAL_TGT_VAL=`cat ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de |md5sum` EXPECTED_TGT_VAL='d27f5a64c839e20c5caa8b9d60075dde -' if [[ $ACTUAL_TGT_VAL = $EXPECTED_TGT_VAL ]]; then echo "OK: correct ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest_dev.tok.clean.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_VAL" echo "ERROR: found $ACTUAL_TGT_VAL" fi ACTUAL_SRC_TEST=`cat ${DATASET_DIR}/newstest2014.tok.bpe.32000.en |md5sum` EXPECTED_SRC_TEST='cb014e2509f86cd81d5a87c240c07464 -' if [[ $ACTUAL_SRC_TEST = $EXPECTED_SRC_TEST ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.tok.bpe.32000.en" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.tok.bpe.32000.en" echo "ERROR: expected $EXPECTED_SRC_TEST" echo "ERROR: found $ACTUAL_SRC_TEST" fi ACTUAL_TGT_TEST=`cat ${DATASET_DIR}/newstest2014.tok.bpe.32000.de |md5sum` EXPECTED_TGT_TEST='d616740f6026dc493e66efdf9ac1cb04 -' if [[ $ACTUAL_TGT_TEST = $EXPECTED_TGT_TEST ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.tok.bpe.32000.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.tok.bpe.32000.de" echo "ERROR: expected $EXPECTED_TGT_TEST" echo "ERROR: found $ACTUAL_TGT_TEST" fi ACTUAL_TGT_TEST_TARGET=`cat ${DATASET_DIR}/newstest2014.de |md5sum` EXPECTED_TGT_TEST_TARGET='f6c3818b477e4a25cad68b61cc883c17 -' if [[ $ACTUAL_TGT_TEST_TARGET = $EXPECTED_TGT_TEST_TARGET ]]; then echo "OK: correct ${DATASET_DIR}/newstest2014.de" else echo "ERROR: incorrect ${DATASET_DIR}/newstest2014.de" echo "ERROR: expected $EXPECTED_TGT_TEST_TARGET" echo "ERROR: found $ACTUAL_TGT_TEST_TARGET" fi
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin
taco2ModulationRemovalPlugin
taco2ModulationRemovalLayerPlugin
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_MODULATIONREMOVALLAYERPLUGIN_H #define TT2I_MODULATIONREMOVALLAYERPLUGIN_H #include "NvInfer.h" #include "cudaMemory.h" #include <string> #include <vector> namespace nvinfer1 { namespace plugin { class Taco2ModulationRemovalLayerPlugin : public nvinfer1::IPluginV2Ext { public: using value_type = float; /** * @brief Get the name of this plugin. * * @return The name. */ static const char* getName(); /** * @brief Get the version of this plugin. * * @return The version. */ static const char* getVersion(); /** * @brief Create a new Taco2ModulationRemovalLayerPlugin plugin from serialized data. * * @param data The data. * @param length The length of the data in bytes. * * @return The instantiated plugin. */ static Taco2ModulationRemovalLayerPlugin deserialize(const void* data, size_t length); /** * @brief Create a new ModulationRemoveLayer. * * @param weight The weights to use. * @param inputLength The input length. * @param filterLength The filter length. * @param hopLength The hop length. */ Taco2ModulationRemovalLayerPlugin( const nvinfer1::Weights& weight, int inputLength, int filterLength, int hopLength); /** * @brief Move constructor. * * @param other The Taco2ModulationRemovalLayerPlugin to move. */ Taco2ModulationRemovalLayerPlugin(Taco2ModulationRemovalLayerPlugin&& other); /** * @brief Move assignment operator. * * @param other The Taco2ModulationRemovalLayerPlugin to move. * * @return This Taco2ModulationRemovalLayerPlugin. */ Taco2ModulationRemovalLayerPlugin& operator=(Taco2ModulationRemovalLayerPlugin&& other); /** * @brief Destructor. */ ~Taco2ModulationRemovalLayerPlugin(); // disable copying Taco2ModulationRemovalLayerPlugin(const Taco2ModulationRemovalLayerPlugin& other) = delete; Taco2ModulationRemovalLayerPlugin& operator=(const Taco2ModulationRemovalLayerPlugin& other) = delete; /** * @brief Return the data type of the plugin output at the requested index. * * @param index The output index. * @param inputTypes The input data types. * @param nbInputs The number of inputs. * * @return The type of output. */ nvinfer1::DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override; /** * @brief Check if the output will be broadcast across the batch. * * @param outputIndex The output index. * @param inputIsBroadCasted Whether or not the input is broadcasted. * @param nbInputs The number of inputs. * * @return True if the output will be broadcasted. */ bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadCasted, int nbInputs) const override; /** * @brief Check if the input can be broadcasted across the batch. * * @param inputIndex The input index. * * @return True if the input can be broadcasted. */ bool canBroadcastInputAcrossBatch(int inputIndex) const override; /** * @brief Get the plugin type. * * @return The plugin type. */ const char* getPluginType() const override; /** * @brief Get the plugin version. * * @return The plugin version. */ const char* getPluginVersion() const override; /** * @brief Get the number of outputs. * * @return The number of outputs. */ int getNbOutputs() const override; /** * @brief Get the dimensions of an output tensor. * * @param index The index of the output. * @param inputs The given inputs. * @param nbInputDims The number of inputs. * * @return The resulting dimensions. */ nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; /** * @brief Check if the given plugin format is supported. * * @param type The data type. * @param format The plugin format. * * @return True if it is supported. */ bool supportsFormat(nvinfer1::DataType type, nvinfer1::PluginFormat format) const override; /** * @brief Configure this plugin with the given inputs, outputs, and datat * types. * * @param inputDims The input tensors dimensions. * @param nbInputs The number of inputs. * @param outputDims The output tensor dimensions. * @param nbOutputs The number of outputs. * @param inputTypes The input data types. * @param outputTypes The output data types. * @param inputIsBroadcast Whether or not the input is broadcast. * @param outputIsBroadcast Whether or not the output is broadcast. * @param format The format for the plugin. * @param maxBatchSize The maximum batch size that will be used. */ void configurePlugin(const nvinfer1::Dims* inputDims, int nbInputs, const nvinfer1::Dims* outputDims, int nbOutputs, const nvinfer1::DataType* inputTypes, const nvinfer1::DataType* outputTypes, const bool* inputIsBroadcast, const bool* outputIsBroadcast, nvinfer1::PluginFormat format, int maxBatchSize) override; /** * @brief Initialize the plugin. * * @return 0 if initialization was successful. Non-zero otherwise. */ int initialize() override; /** * @brief Terminate the plugin (deinitialize). */ void terminate() override; /** * @brief Get workspace size required by this plugin for up to the given * batch size. * * @param maxBatchSize The maximum number of items in the batch. * * @return The workspace size in bytes. */ size_t getWorkspaceSize(int maxBatchSize) const override; /** * @brief Set this plugin for execution on the stream. * * @param batchSize The number of items in the batch. * @param inputs The input tensors. * @param outputs The output tensors. * @param workspace The workspace. * @param stream The stream to operate on. * * @return 0 if successfully queued, non-zero otherwise. */ int enqueue( int batchSize, const void* const* inputs, void** outputs, void* workspace, cudaStream_t stream) override; /** * @brief Get the number of bytes occupied by this plugin if serialized. * * @return The size in bytes. */ size_t getSerializationSize() const override; /** * @brief Serialize this plugin. * * @param buffer The buffer to write to. */ void serialize(void* buffer) const override; /** * @brief Destroy this plugin instance. */ void destroy() override; /** * @brief Clone this pulgin instance. * * @return The cloned plugin. */ IPluginV2Ext* clone() const override; /** * @brief Set the namespace of this plugin. * * @param pluginNamespace The namespace. */ void setPluginNamespace(const char* pluginNamespace) override; /** * @brief Get the namespace of this plugin. * * @return The namespace. */ const char* getPluginNamespace() const override; private: int mInputLength; int mFilterLength; int mHopLength; std::vector<float> mWeightsHost; tts::CudaMemory<float> mWeightsDevice; std::string mNamespace; }; } // namespace plugin } // namespace nvinfer1 #endif
TensorFlow2/LanguageModeling/ELECTRA
ELECTRA
utils
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json, pickle, sys, unicodedata, six, time, os import horovod.tensorflow as hvd import tensorflow as tf import dllogger def get_rank(): try: return hvd.rank() except: return 0 def get_world_size(): try: return hvd.size() except: return 1 def is_main_process(): return get_rank() == 0 def format_step(step): if isinstance(step, str): return step s = "" if len(step) == 1: s += "Training Iteration: {} ".format(step[0]) return s if len(step) > 0: s += "Training Epoch: {} ".format(step[0]) if len(step) > 1: s += "Training Iteration: {} ".format(step[1]) return s def load_json(path): with tf.io.gfile.GFile(path, "r") as f: return json.load(f) def write_json(o, path): if "/" in path: tf.io.gfile.makedirs(path.rsplit("/", 1)[0]) with tf.io.gfile.GFile(path, "w") as f: json.dump(o, f) def load_pickle(path): with tf.io.gfile.GFile(path, "rb") as f: return pickle.load(f) def write_pickle(o, path): if "/" in path: tf.io.gfile.makedirs(path.rsplit("/", 1)[0]) with tf.io.gfile.GFile(path, "wb") as f: pickle.dump(o, f, -1) def mkdir(path): if not tf.io.gfile.exists(path): tf.io.gfile.makedirs(path) def rmrf(path): if tf.io.gfile.exists(path): tf.io.gfile.rmtree(path) def rmkdir(path): rmrf(path) mkdir(path) def log(*args, **kwargs): all_rank = kwargs.pop("all_rank", False) if not all_rank and not is_main_process(): return msg = " ".join(map(str, args)) sys.stdout.write(msg + "\n") sys.stdout.flush() def log_config(config): for key, value in sorted(config.__dict__.items()): log(key, value) log() def heading(*args): log(80 * "=") log(*args) log(80 * "=") def nest_dict(d, prefixes, delim="_"): """Go from {prefix_key: value} to {prefix: {key: value}}.""" nested = {} for k, v in d.items(): for prefix in prefixes: if k.startswith(prefix + delim): if prefix not in nested: nested[prefix] = {} nested[prefix][k.split(delim, 1)[1]] = v else: nested[k] = v return nested def flatten_dict(d, delim="_"): """Go from {prefix: {key: value}} to {prefix_key: value}.""" flattened = {} for k, v in d.items(): if isinstance(v, dict): for k2, v2 in v.items(): flattened[k + delim + k2] = v2 else: flattened[k] = v return flattened def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def get_readable_time(elapsed): d, h, m, s = [int(x) for x in time.strftime("%d:%H:%M:%S", time.gmtime(elapsed)).split(':')] d -= 1 return '{:2d}h{:2d}m{:2d}s'.format(24*d + h, m, s) def setup_logger(args): os.makedirs(args.log_dir, exist_ok=True) if not args.json_summary: log_path = os.path.join(args.log_dir, 'dllogger_rank{}.log'.format(get_rank())) else: log_path = "{}_rank{}".format(args.json_summary, get_rank()) if is_main_process(): dllogger.init(backends = [dllogger.JSONStreamBackend(verbosity=1, filename=log_path), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) else: dllogger.init(backends = [dllogger.JSONStreamBackend(verbosity=1, filename=log_path)]) for k,v in vars(args).items(): dllogger.log(step='PARAMETER', data={k:v}, verbosity=0) container_setup_info = { 'NVIDIA_TENSORFLOW_VERSION': os.environ.get('NVIDIA_TENSORFLOW_VERSION'), 'TENSORFLOW_VERSION': os.environ.get('TENSORFLOW_VERSION'), 'CUBLAS_VERSION': os.environ.get('CUBLAS_VERSION'), 'NCCL_VERSION': os.environ.get('NCCL_VERSION'), 'CUDA_DRIVER_VERSION': os.environ.get('CUDA_DRIVER_VERSION'), 'CUDNN_VERSION': os.environ.get('CUDNN_VERSION'), 'CUDA_VERSION': os.environ.get('CUDA_VERSION'), 'NVIDIA_PIPELINE_ID': os.environ.get('NVIDIA_PIPELINE_ID'), 'NVIDIA_BUILD_ID': os.environ.get('NVIDIA_BUILD_ID'), 'NVIDIA_TF32_OVERRIDE': os.environ.get('NVIDIA_TF32_OVERRIDE'), } dllogger.log(step='PARAMETER', data=container_setup_info, verbosity=0) def postprocess_dllog(args): if not args.json_summary: log_path = os.path.join(args.log_dir, 'dllogger_rank{}.log') else: log_path = str(args.json_summary) + "_rank{}" logfiles = [open(log_path.format(i), 'r') for i in range(get_world_size())] if not args.json_summary: log_path = os.path.join(args.log_dir, 'dllogger.log') else: log_path = str(args.json_summary) with open(log_path, 'w') as dest_file: for lines in zip(*[f.readlines() for f in logfiles]): json_lines = [json.loads(l[5:]) for l in lines] assert all(x['type'] == json_lines[0]['type'] for x in json_lines) if json_lines[0]['type'] != 'LOG': dest_file.write(lines[0]) continue assert all(x['step'] == json_lines[0]['step'] for x in json_lines) if json_lines[0]['step'] == 'PARAMETER': dest_file.write(lines[0]) else: d = dict.fromkeys(json_lines[0]['data']) for k in d.keys(): vs = [line['data'][k] for line in json_lines] d[k] = sum(vs)/len(vs) json_lines[0]['data'] = d dest_file.write('DLLL ') dest_file.write(json.dumps(json_lines[0])) dest_file.write('\n') for l in logfiles: l.close()
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker/containers
containers
triton_server_container
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import pathlib from threading import Thread from typing import Dict, Generator, Union from docker.models.containers import ExecResult from docker.types import DeviceRequest, Ulimit if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ....logger import LOGGER from ...exceptions import ContainerNotStarted from ..container import DockerContainer class TritonServerContainer(DockerContainer): def __init__( self, name: str, command: str, image: str, volumes: Dict, devices: Union[list, int], environment: Dict, log_file: Union[pathlib.Path, str], network: str = "host", shm_size: str = "1G", ): """ Initialize Triton Server Container Args: name: Container name command: Triton Server command to exec on container start image: Docker Image volumes: Volumes to mount inside container devices: Devices which has to be visible in container environment: Environment variables log_file: Path where logs should be saved network: Network mode shm_size: Shared memory size """ super().__init__(name) self._image = image self._command = command self._volumes = volumes self._devices = devices self._environment = environment self._network = network self._shm_size = shm_size self._triton_exec = None self._logging_thread = None self._log_file_path = pathlib.Path(log_file) def start(self) -> None: """ Start Triton Server Container """ devices = [ DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices), ] LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}") LOGGER.info(f"Starting Triton container {self.name}.") self._container = self._docker_client.containers.run( image=self._image, name=self.name, device_requests=devices, detach=True, tty=True, shm_size=self._shm_size, ulimits=[ Ulimit(name="memlock", soft=-1, hard=-1), Ulimit(name="stack", soft=67108864, hard=67108864), ], volumes=self._volumes, environment=self._environment, network_mode=self._network, auto_remove=True, ipc_mode="host", ) LOGGER.info("Triton command:") LOGGER.info(f" {self._command}") LOGGER.info(f"Starting Triton Server {self.name}.") self._triton_exec = self._docker_api_client.exec_create( container=self._container.id, cmd=self._command, ) stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True) self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True) self._logging_thread.start() def stop(self) -> None: """ Stop Triton Server Container and save logs to file """ if self._container is not None: triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"]) if triton_result.get("ExitCode") not in (0, None): LOGGER.info( f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}" ) LOGGER.info(f"Stopping triton server {self.name}.") self._container.stop() self._container = None self._docker_client.close() self._docker_api_client.close() def run(self, command: str) -> ExecResult: """ Run command in container Args: command: Command to execute Returns: ExecResult """ if not self._container: raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.") return self._container.exec_run(command) def _logging(self, generator: Generator) -> None: """Triton logging thread for Triton Inference Server Args: generator (string generator): Triton log stream. """ with open(self._log_file_path, mode="w") as file: try: while True: log = next(generator) txt = log.decode("utf-8") file.write(txt) except StopIteration: LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
PyTorch/LanguageModeling/BART/bart/modeling
modeling
modeling_bart
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BART model, ported from the fairseq repo.""" import logging import math import random import warnings from typing import Dict, List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from torch.nn import CrossEntropyLoss from utils.activations import ACT2FN from bart.configuration.configuration_bart import BartConfig from utils.file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from bart.modeling.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from bart.modeling.modeling_utils import PreTrainedModel logger = logging.getLogger(__name__) _CONFIG_FOR_DOC = "BartConfig" _TOKENIZER_FOR_DOC = "BartTokenizer" BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/bart-large", # See all BART models at https://huggingface.co/models?filter=bart ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), float("-inf")) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True): ''' if torch.cuda.is_available(): try: from apex.normalization import FusedLayerNorm return FusedLayerNorm(normalized_shape, eps, elementwise_affine) except ImportError: pass ''' return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) class BartLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): assert padding_idx is not None, "`padding_idx` should not be None, but of type int" # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models dont have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, padding_idx=padding_idx) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions + self.offset) class BartAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})." self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( bsz * self.num_heads, tgt_len, src_len, ), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" if attention_mask is not None: assert attention_mask.size() == ( bsz, 1, tgt_len, src_len, ), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask.to(attn_weights.dtype) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1, dtype=attn_weights.dtype) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( bsz * self.num_heads, tgt_len, self.head_dim, ), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" attn_output = ( attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) .transpose(1, 2) .reshape(bsz, tgt_len, embed_dim) ) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class BartEncoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BartAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.pre_ln = config.pre_ln def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states if self.pre_ln: dtype = hidden_states.dtype hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states else: hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) dtype = hidden_states.dtype hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class BartDecoderLayer(nn.Module): def __init__(self, config: BartConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BartAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = LayerNorm(self.embed_dim) self.encoder_attn = BartAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim) self.pre_ln = config.pre_ln def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, encoder_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size `(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size `(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple if self.pre_ln: dtype = hidden_states.dtype hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states = self.encoder_attn_layer_norm(hidden_states).to(dtype) hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states else: hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) dtype = hidden_states.dtype hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states).to(dtype) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states).to(dtype) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)).to(dtype) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states).to(dtype) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class BartClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BartPretrainedModel(PreTrainedModel): config_class = BartConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs class PretrainedBartModel(BartPretrainedModel): def __init_subclass__(self): warnings.warn( "The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.", FutureWarning, ) BART_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BartConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BART_GENERATION_EXAMPLE = r""" Summarization example:: >>> from transformers import BartTokenizer, BartForConditionalGeneration, BartConfig >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> ARTICLE_TO_SUMMARIZE = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='pt') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True) >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]) Mask filling example:: >>> from transformers import BartTokenizer, BartForConditionalGeneration >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> TXT = "My friends are <mask> but they eat too many carbs." >>> model = BartForConditionalGeneration.from_pretrained('facebook/bart-large') >>> input_ids = tokenizer([TXT], return_tensors='pt')['input_ids'] >>> logits = model(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> tokenizer.decode(predictions).split() """ BART_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ Bart uses the :obj:`eos_token_id` as the starting token for :obj:`decoder_input_ids` generation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no :obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_bart._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy. head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class BartEncoder(BartPretrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`BartEncoderLayer`. Args: config: BartConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.cast_dtype = config.dtype self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) self.embed_positions = BartLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = LayerNorm(embed_dim) self.pre_ln = config.pre_ln if self.pre_ln: self.last_layernorm = LayerNorm(embed_dim) self.init_weights() def forward( self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) if self.cast_dtype: attention_mask = attention_mask.to(self.cast_dtype) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if getattr(self.config, "gradient_checkpointing", False) and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if self.pre_ln: hidden_states = self.last_layernorm(hidden_states) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class BartDecoder(BartPretrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`BartDecoderLayer` Args: config: BartConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.cast_dtype = config.dtype self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 if embed_tokens is not None: self.embed_tokens = embed_tokens else: self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = BartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = LayerNorm(config.d_model) self.pre_ln = config.pre_ln if self.pre_ln: self.last_layernorm = LayerNorm(config.d_model) self.init_weights() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(self.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) if attention_mask is not None: attention_mask = attention_mask.to(self.cast_dtype) assert encoder_hidden_states.dtype==self.cast_dtype encoder_attention_mask = encoder_attention_mask.to(self.cast_dtype) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, encoder_head_mask[idx] if encoder_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if self.pre_ln: hidden_states = self.last_layernorm(hidden_states) if self.cast_dtype: hidden_states = hidden_states.to(self.cast_dtype) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare BART Model outputting raw hidden-states without any specific head on top.", BART_START_DOCSTRING, ) class BartModel(BartPretrainedModel): def __init__(self, config: BartConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BartEncoder(config, self.shared) self.decoder = BartDecoder(config, self.shared) self.init_weights() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # different to other models, Bart automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING ) class BartForConditionalGeneration(BartPretrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"encoder\.version", r"decoder\.version", r"lm_head\.weight", ] def __init__(self, config: BartConfig): super().__init__(config) self.model = BartModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) self.init_weights() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BART_GENERATION_EXAMPLE) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past=None, attention_mask=None, head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ Bart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BART_START_DOCSTRING, ) class BartForSequenceClassification(BartPretrainedModel): def __init__(self, config: BartConfig, **kwargs): super().__init__(config, **kwargs) self.model = BartModel(config) self.classification_head = BartClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) self.model._init_weights(self.classification_head.dense) self.model._init_weights(self.classification_head.out_proj) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id) if len(torch.unique(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ BART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BART_START_DOCSTRING, ) class BartForQuestionAnswering(BartPretrainedModel): def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BartModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.model._init_weights(self.qa_outputs) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="facebook/bart-large", output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, start_positions=None, end_positions=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) class BartDecoderWrapper(BartPretrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the :class:`~transformers.EncoderDecoderModel` framework. """ def __init__(self, config): super().__init__(config) self.decoder = BartDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BartForCausalLM(BartPretrainedModel): def __init__(self, config): super().__init__(config) config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False self.model = BartDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.init_weights() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.BartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids`` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. Returns: Example:: >>> from transformers import BartTokenizer, BartForCausalLM >>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large') >>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_head_mask=encoder_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, } @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
CUDA-Optimized/FastSpeech/fastspeech/model
model
fastspeech
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from collections import OrderedDict import numpy as np import torch from torch import nn as nn from fastspeech.model.module import FFTBlocks, LengthRegulator from fastspeech.utils.pytorch import to_device_async from fastspeech.utils.nvtx import Nvtx from torch.nn import functional as F from fastspeech.utils.logging import tprint from fastspeech.text_norm.symbols import symbols class Fastspeech(nn.Module): """ FastSpeech """ def __init__(self, max_seq_len, d_model, phoneme_side_n_layer, phoneme_side_head, phoneme_side_conv1d_filter_size, phoneme_side_output_size, mel_side_n_layer, mel_side_head, mel_side_conv1d_filter_size, mel_side_output_size, fft_conv1d_kernel, fft_conv1d_padding, duration_predictor_filter_size, duration_predictor_kernel_size, dropout, n_mels, fused_layernorm=False): super(Fastspeech, self).__init__() self.max_seq_len = max_seq_len self.d_model = d_model self.phoneme_side_n_layer = phoneme_side_n_layer self.phoneme_side_head = phoneme_side_head self.phoneme_side_conv1d_filter_size = phoneme_side_conv1d_filter_size self.phoneme_side_output_size = phoneme_side_output_size self.mel_side_n_layer = mel_side_n_layer self.mel_side_head = mel_side_head self.mel_side_conv1d_filter_size = mel_side_conv1d_filter_size self.mel_side_output_size = mel_side_output_size self.fft_conv1d_kernel = fft_conv1d_kernel self.fft_conv1d_padding = fft_conv1d_padding self.duration_predictor_filter_size = duration_predictor_filter_size self.duration_predictor_kernel_size = duration_predictor_kernel_size self.dropout = dropout self.n_mels = n_mels self.fused_layernorm = fused_layernorm self.n_phns = len(symbols)+1 self.word_emb = nn.Embedding( self.n_phns, d_model, padding_idx=0) self.phoneme_side = FFTBlocks( max_seq_len=max_seq_len, n_layers=phoneme_side_n_layer, n_head=phoneme_side_head, d_k=64, d_v=64, d_model=d_model, d_inner=phoneme_side_conv1d_filter_size, fft_conv1d_kernel=fft_conv1d_kernel, fft_conv1d_padding=fft_conv1d_padding, dropout=dropout, name="phoneme_side", fused_layernorm=fused_layernorm ) self.length_regulator = LengthRegulator( input_size=phoneme_side_output_size, duration_predictor_filter_size=duration_predictor_filter_size, duration_predictor_kernel_size=duration_predictor_kernel_size, dropout=dropout, fused_layernorm=fused_layernorm ) self.mel_side = FFTBlocks( max_seq_len=max_seq_len, n_layers=mel_side_n_layer, n_head=mel_side_head, d_k=64, d_v=64, d_model=d_model, d_inner=mel_side_conv1d_filter_size, fft_conv1d_kernel=fft_conv1d_kernel, fft_conv1d_padding=fft_conv1d_padding, dropout=dropout, name="mel_side", fused_layernorm=fused_layernorm ) self.mel_linear = nn.Linear(mel_side_output_size, n_mels, bias=True) def forward(self, seq, pos, duration_target=None, alpha=1.0, seq_output_len=None, use_fp16=False, acts=None): # Phoneme Embedding output = self.word_emb(seq) if acts is not None: acts["act.emb"] = output if use_fp16: output = output.half() # Phoneme Side FFT Blocks output, output_mask = self.phoneme_side(output, pos, acts=acts) if acts is not None: acts["act.phoneme_side.seq"] = output # Length Regulator output, pos, duration = self.length_regulator( output, output_mask, target=duration_target, alpha=alpha) if seq_output_len: output = F.pad(output, pad=(0, 0, 0, seq_output_len - output.size(1))) pos = F.pad(pos, pad=(0, seq_output_len - pos.size(1))) # length of output mel shouldn't exceed max_seq_len output = output[:, :self.max_seq_len] pos = pos[:, :self.max_seq_len] if acts is not None: acts["act.length_regulator.seq"] = output acts["act.length_regulator.dur"] = torch.round(duration) if self.training or output.bool().any(): # Mel Side FFT Blocks output, output_mask = self.mel_side(output, pos, acts=acts) if acts is not None: acts["act.mel_side.seq"] = output # Linear Layer output = self.mel_linear(output) if acts is not None: acts["out.seq_mask"] = output_mask acts["out.seq"] = output else: # seq length could be zero, in case duration predictor outputs all zeros. # In this case, skip feed-forwarding. tprint("Duration Predictor outputs all zeros. Output will be zero length.") output_shape = (output.size(0), 0, output_mask.size(2)) output = torch.zeros(size=(output_shape)) output_mask = torch.ones(size=(output_shape)) if torch.cuda.device_count() > 1: # In a multi-gpu setting, all output mels from devices must have the same length. # otherwise, an error occurs in process of gathering output. if not seq_output_len: seq_output_len = self.max_seq_len padding = (0, 0, 0, seq_output_len - output.size(1)) output = F.pad(output, padding) output = output[:, :seq_output_len, :] output_mask = F.pad(output_mask, padding) output_mask = output_mask[:, :seq_output_len, :] return output, output_mask, duration
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
json_utils_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for google3.image.understanding.object_detection.utils.json_utils.""" import os import tensorflow as tf from object_detection.utils import json_utils class JsonUtilsTest(tf.test.TestCase): def testDumpReasonablePrecision(self): output_path = os.path.join(tf.test.get_temp_dir(), 'test.json') with tf.gfile.GFile(output_path, 'w') as f: json_utils.Dump(1.0, f, float_digits=2) with tf.gfile.GFile(output_path, 'r') as f: self.assertEqual(f.read(), '1.00') def testDumpPassExtraParams(self): output_path = os.path.join(tf.test.get_temp_dir(), 'test.json') with tf.gfile.GFile(output_path, 'w') as f: json_utils.Dump([1.0], f, float_digits=2, indent=3) with tf.gfile.GFile(output_path, 'r') as f: self.assertEqual(f.read(), '[\n 1.00\n]') def testDumpZeroPrecision(self): output_path = os.path.join(tf.test.get_temp_dir(), 'test.json') with tf.gfile.GFile(output_path, 'w') as f: json_utils.Dump(1.0, f, float_digits=0, indent=3) with tf.gfile.GFile(output_path, 'r') as f: self.assertEqual(f.read(), '1') def testDumpUnspecifiedPrecision(self): output_path = os.path.join(tf.test.get_temp_dir(), 'test.json') with tf.gfile.GFile(output_path, 'w') as f: json_utils.Dump(1.012345, f) with tf.gfile.GFile(output_path, 'r') as f: self.assertEqual(f.read(), '1.012345') def testDumpsReasonablePrecision(self): s = json_utils.Dumps(1.0, float_digits=2) self.assertEqual(s, '1.00') def testDumpsPassExtraParams(self): s = json_utils.Dumps([1.0], float_digits=2, indent=3) self.assertEqual(s, '[\n 1.00\n]') def testDumpsZeroPrecision(self): s = json_utils.Dumps(1.0, float_digits=0) self.assertEqual(s, '1') def testDumpsUnspecifiedPrecision(self): s = json_utils.Dumps(1.012345) self.assertEqual(s, '1.012345') def testPrettyParams(self): s = json_utils.Dumps({'v': 1.012345, 'n': 2}, **json_utils.PrettyParams()) self.assertEqual(s, '{\n "n": 2,\n "v": 1.0123\n}') def testPrettyParamsExtraParamsInside(self): s = json_utils.Dumps( {'v': 1.012345, 'n': float('nan')}, **json_utils.PrettyParams(allow_nan=True)) self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}') with self.assertRaises(ValueError): s = json_utils.Dumps( {'v': 1.012345, 'n': float('nan')}, **json_utils.PrettyParams(allow_nan=False)) def testPrettyParamsExtraParamsOutside(self): s = json_utils.Dumps( {'v': 1.012345, 'n': float('nan')}, allow_nan=True, **json_utils.PrettyParams()) self.assertEqual(s, '{\n "n": NaN,\n "v": 1.0123\n}') with self.assertRaises(ValueError): s = json_utils.Dumps( {'v': 1.012345, 'n': float('nan')}, allow_nan=False, **json_utils.PrettyParams()) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechSynthesis/FastPitch/waveglow
waveglow
denoiser
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import torch from common.layers import STFT class Denoiser(torch.nn.Module): """ Removes model bias from audio produced with waveglow """ def __init__(self, waveglow, filter_length=1024, n_overlap=4, win_length=1024, mode='zeros'): super(Denoiser, self).__init__() device = waveglow.upsample.weight.device dtype = waveglow.upsample.weight.dtype self.stft = STFT(filter_length=filter_length, hop_length=int(filter_length/n_overlap), win_length=win_length).to(device) if mode == 'zeros': mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device) elif mode == 'normal': mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device) else: raise Exception("Mode {} if not supported".format(mode)) with torch.no_grad(): bias_audio = waveglow.infer(mel_input, sigma=0.0).float() bias_spec, _ = self.stft.transform(bias_audio) self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None]) def forward(self, audio, strength=0.1): audio_spec, audio_angles = self.stft.transform(audio) audio_spec_denoised = audio_spec - self.bias_spec * strength audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles) return audio_denoised
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch
fastpitch
transformer
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from common.utils import mask_from_lens class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1), torch.unsqueeze(self.inv_freq, 0)) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1) if bsz is not None: return pos_emb[None, :, :].expand(bsz, -1, -1) else: return pos_emb[None, :, :] class PositionwiseConvFF(nn.Module): def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False): super(PositionwiseConvFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)), nn.ReLU(), # nn.Dropout(dropout), # worse convergence nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): return self._forward(inp) def _forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(self.layer_norm(core_out).to(inp.dtype)) core_out = core_out.transpose(1, 2) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(core_out) core_out = core_out.transpose(1, 2) # residual connection + layer normalization output = self.layer_norm(inp + core_out).to(inp.dtype) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def forward(self, inp, attn_mask=None): return self._forward(inp, attn_mask) def _forward(self, inp, attn_mask=None): residual = inp if self.pre_lnorm: # layer normalization inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view( inp.size(0), inp.size(1), n_head * d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = residual + attn_out else: # residual connection + layer normalization output = self.layer_norm(residual + attn_out) output = output.to(attn_out.dtype) return output class TransformerLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout, **kwargs): super(TransformerLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, mask=None): output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2)) output *= mask output = self.pos_ff(output) output *= mask return output class FFTransformer(nn.Module): def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt, dropemb=0.0, embed_input=True, n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False): super(FFTransformer, self).__init__() self.d_model = d_model self.n_head = n_head self.d_head = d_head self.padding_idx = padding_idx if embed_input: self.word_emb = nn.Embedding(n_embed, d_embed or d_model, padding_idx=self.padding_idx) else: self.word_emb = None self.pos_emb = PositionalEmbedding(self.d_model) self.drop = nn.Dropout(dropemb) self.layers = nn.ModuleList() for _ in range(n_layer): self.layers.append( TransformerLayer( n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm) ) def forward(self, dec_inp, seq_lens=None, conditioning=0): if self.word_emb is None: inp = dec_inp mask = mask_from_lens(seq_lens).unsqueeze(2) else: inp = self.word_emb(dec_inp) # [bsz x L x 1] mask = (dec_inp != self.padding_idx).unsqueeze(2) pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype) pos_emb = self.pos_emb(pos_seq) * mask out = self.drop(inp + pos_emb + conditioning) for layer in self.layers: out = layer(out, mask=mask) # out = self.drop(out) return out, mask
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/trtis_client/src
src
CMakeLists
## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # include_directories(".") file(GLOB trtis_client_sources *.cpp) include_directories("${CMAKE_SOURCE_DIR}/trtis_include/") include_directories("${CMAKE_SOURCE_DIR}/trtis_include/build/trtis-clients") include_directories("${CMAKE_SOURCE_DIR}/trtis_include/build/protobuf/include") link_directories("${CMAKE_SOURCE_DIR}/trtis_lib") add_library(tt2i_client ${trtis_client_sources}) target_compile_options(tt2i_client PRIVATE ${CPP_DEVEL_FLAGS} -fPIC) set_property(TARGET tt2i_client PROPERTY ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) add_subdirectory(bin)
PyTorch/LanguageModeling/BERT/lamb_amp_opt/csrc
csrc
multi_tensor_lamb
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> // Another possibility: // #include <torch/all.h> #include <iostream> #include <assert.h> #include <type_traits> #include "type_shim.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 std::tuple<at::Tensor, at::Tensor> multi_tensor_l2norm_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::optional<bool> per_tensor_python); template<typename T> __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } typedef enum{ MOMENT_MODE_0 =0, // L2 regularization mode MOMENT_MODE_1 =1 // Decoupled weight decay mode } adamMode_t; using MATH_T = float; #include<cstdio> template<typename T, typename param_t> struct LAMBStage1Functor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<4>& tl, const float beta1, const float beta2, const float beta3, const int* step_ptr, const int bias_correction, const float epsilon, adamMode_t mode, const float decay, const float* global_grad_norm, const float* max_global_grad_norm, const float* found_inf, const float* inv_scale) { if (*noop_gmem) { return; } float beta1_correction = 1.0f; float beta2_correction = 1.0f; if (bias_correction == 1) { int step = *step_ptr; beta1_correction = 1 - std::pow(beta1, step); beta2_correction = 1 - std::pow(beta2, step); } int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; float clipped_global_grad_norm = (*global_grad_norm) > (*max_global_grad_norm) ? (*global_grad_norm) / (*max_global_grad_norm) : 1.0f; T* g = (T*)tl.addresses[0][tensor_loc]; g += chunk_idx*chunk_size; param_t* p = (param_t*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; param_t* m = (param_t*)tl.addresses[2][tensor_loc]; m += chunk_idx*chunk_size; param_t* v = (param_t*)tl.addresses[3][tensor_loc]; v += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(g) && is_aligned(p) && is_aligned(m) && is_aligned(v)) { T l_g[ILP]; param_t l_p[ILP]; param_t l_m[ILP]; param_t l_v[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(l_g, g, 0, i_start); if (decay != 0) load_store(l_p, p, 0, i_start); load_store(l_m, m, 0, i_start); load_store(l_v, v, 0, i_start); // unpack #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_g[ii] = l_g[ii] * (*inv_scale); if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = l_p[ii]; } r_m[ii] = l_m[ii]; r_v[ii] = l_v[ii]; } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } // printf("(%d %d) r_g: %f, r_p: %f, r_m: %f, r_v: %f\n", i_start, ii, r_g[ii], r_p[ii], r_m[ii], r_v[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { l_p[ii] = r_p[ii]; // Difference from APEX's LAMB kernel. `g` and `p` can be different dtypes. l_g[ii] = r_p[ii]; l_m[ii] = r_m[ii]; l_v[ii] = r_v[ii]; } // store load_store(g, l_g, i_start, 0); load_store(m, l_m, i_start, 0); load_store(v, l_v, i_start, 0); } } else { // see note in multi_tensor_scale_kernel.cu for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_g[ILP]; MATH_T r_p[ILP]; MATH_T r_m[ILP]; MATH_T r_v[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_g[ii] = g[i] * (*inv_scale); // special ?optimization? for lamb stage 1 if (decay == 0) { r_p[ii] = MATH_T(0); } else { r_p[ii] = p[i]; } r_m[ii] = m[i]; r_v[ii] = v[i]; } else { r_g[ii] = MATH_T(0); r_p[ii] = MATH_T(0); r_m[ii] = MATH_T(0); r_v[ii] = MATH_T(0); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { if (mode == MOMENT_MODE_0) { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; // L2 on scaled grad scaled_grad = scaled_grad + decay*r_p[ii]; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = next_m_unbiased / denom; } else { MATH_T scaled_grad = r_g[ii] / clipped_global_grad_norm; r_m[ii] = r_m[ii] * beta1 + beta3 * scaled_grad; r_v[ii] = r_v[ii] * beta2 + (1-beta2) * scaled_grad * scaled_grad; MATH_T next_m_unbiased = r_m[ii] / beta1_correction; MATH_T next_v_unbiased = r_v[ii] / beta2_correction; MATH_T denom = sqrtf(next_v_unbiased) + epsilon; r_p[ii] = (next_m_unbiased/denom) + (decay*r_p[ii]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { g[i] = r_p[ii]; m[i] = r_m[ii]; v[i] = r_v[ii]; } } } } } }; // Step 2 reads in 'update' value and per-tensor param_norm and update_norm. // It computes new parameter value. // N == 2: FP32 params, no master params // N == 3: FP16 params, FP32 master params. template<typename T, int N, typename param_t> struct LAMBStage2Functor { static_assert((N == 2 && std::is_same<T, param_t>::value) || (N == 3 && std::is_same<param_t, float>::value), ""); __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, const float* per_tensor_param_norm, const float* per_tensor_update_norm, const float* learning_rate, const float decay, bool use_nvlamb) { if (*noop_gmem) { return; } int tensor_loc = tl.block_to_tensor[blockIdx.x]; int tensor_num = tl.start_tensor_this_launch + tensor_loc; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; MATH_T ratio = *learning_rate; // nvlamb: apply adaptive learning rate to all parameters // otherwise, only apply to those with non-zero weight decay if (use_nvlamb || (decay != 0.0)) { float param_norm = per_tensor_param_norm[tensor_num]; float update_norm = per_tensor_update_norm[tensor_num]; ratio = (update_norm != 0.0f && param_norm != 0.0f) ? *learning_rate * (param_norm / update_norm) : *learning_rate; } T* update = (T*)tl.addresses[0][tensor_loc]; update += chunk_idx*chunk_size; param_t* p = (param_t*)tl.addresses[1][tensor_loc]; p += chunk_idx*chunk_size; T* out_p; if (N == 3) { out_p = (T*)tl.addresses[2][tensor_loc]; out_p += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // to make things simple, we put aligned case in a different code path bool can_use_aligned_path = n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(update); if (N == 3) { can_use_aligned_path = can_use_aligned_path && is_aligned(out_p); } if(can_use_aligned_path) { param_t r_p[ILP]; T r_update[ILP]; T r_out_p[ILP]; for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load load_store(r_p, p, 0, i_start); load_store(r_update, update, 0, i_start); if (N == 3) { load_store(r_out_p, out_p, 0, i_start); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = static_cast<MATH_T>(r_p[ii]) - (ratio * static_cast<MATH_T>(r_update[ii])); if (N == 3) { r_out_p[ii] = r_p[ii]; } } load_store(p, r_p, i_start, 0); if (N == 3) { load_store(out_p, r_out_p, i_start, 0); } } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { MATH_T r_p[ILP]; MATH_T r_update[ILP]; #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { r_p[ii] = p[i]; r_update[ii] = update[i]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { r_p[ii] = r_p[ii] - (ratio * r_update[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { p[i] = r_p[ii]; if (N == 3) { out_p[i] = p[i]; } } } } } } }; void multi_tensor_lamb_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, at::Tensor lr, const float beta1, const float beta2, const float epsilon, const at::Tensor step, const int bias_correction, const float weight_decay, const int grad_averaging, const int mode, at::Tensor global_grad_norm, at::Tensor max_grad_norm, at::optional<bool> use_nvlamb_python, at::Tensor found_inf, at::Tensor inv_scale) { // n_tensors == 5: FP16 model params & FP32 master params // n_tensors == 4: FP32 model params & NO FP32 master params const auto n_tensors = tensor_lists.size(); assert(n_tensors == 4 || n_tensors == 5); using namespace at; bool use_nvlamb = use_nvlamb_python.has_value() ? use_nvlamb_python.value() : false; // note(mkozuki): move bias handling below to functor // Handle bias correction mode // float bias_correction1 = 1.0f, bias_correction2 = 1.0f; // if (bias_correction == 1) { // bias_correction1 = 1 - std::pow(beta1, step); // bias_correction2 = 1 - std::pow(beta2, step); // } // Handle grad averaging mode float beta3 = 1.0f; if (grad_averaging == 1) beta3 = 1 - beta1; std::vector<std::vector<at::Tensor>> stage1_tensor_lists(tensor_lists.begin(), tensor_lists.begin() + 4); std::vector<std::vector<at::Tensor>> grad_list(tensor_lists.begin(), tensor_lists.begin()+1); std::vector<std::vector<at::Tensor>> param_list(tensor_lists.begin()+1, tensor_lists.begin()+2); // Compute per tensor param norm auto param_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, param_list, true); // We now in-place modify grad to store update before compute its norm // Generally this is not a issue since people modify grad in step() method all the time // We can also grab list of empty tensor to avoid this, but I'd like to save space/cpu code if (n_tensors == 4) { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, stage1_tensor_lists, LAMBStage1Functor<scalar_t_0, scalar_t_0>(), beta1, beta2, beta3, // 1-beta1 or 1 depends on averaging mode // bias_correction1, // bias_correction2, step.data_ptr<int>(), bias_correction, epsilon, (adamMode_t) mode, weight_decay, global_grad_norm.data_ptr<float>(), max_grad_norm.data_ptr<float>(), found_inf.data_ptr<float>(), inv_scale.data_ptr<float>()); ) } else { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_1", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, stage1_tensor_lists, LAMBStage1Functor<scalar_t_0, float>(), beta1, beta2, beta3, // 1-beta1 or 1 depends on averaging mode // bias_correction1, // bias_correction2, step.data_ptr<int>(), bias_correction, epsilon, (adamMode_t) mode, weight_decay, global_grad_norm.data_ptr<float>(), max_grad_norm.data_ptr<float>(), found_inf.data_ptr<float>(), inv_scale.data_ptr<float>()); ) } // Compute update norms auto update_norm_tuple = multi_tensor_l2norm_cuda(chunk_size, noop_flag, grad_list, true); std::vector<std::vector<at::Tensor>> grad_param_list(tensor_lists.begin(), tensor_lists.begin()+2); if (n_tensors == 4) { DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2", multi_tensor_apply<2>( BLOCK_SIZE, chunk_size, noop_flag, grad_param_list, LAMBStage2Functor<scalar_t_0, 2, scalar_t_0>(), std::get<1>(param_norm_tuple).data_ptr<float>(), std::get<1>(update_norm_tuple).data_ptr<float>(), lr.data_ptr<float>(), weight_decay, use_nvlamb); ) } else { grad_param_list.push_back(tensor_lists[4]); DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "lamb_stage_2", multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, grad_param_list, LAMBStage2Functor<scalar_t_0, 3, float>(), std::get<1>(param_norm_tuple).data_ptr<float>(), std::get<1>(update_norm_tuple).data_ptr<float>(), lr.data_ptr<float>(), weight_decay, use_nvlamb); ) } AT_CUDA_CHECK(cudaGetLastError()); }
PyTorch/Classification/ConvNets/efficientnet/inference/TF32
TF32
DGXA100_efficientnet-widese-b4_TF32
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 256 --workspace ${1:-./} --raport-file raport_256.json
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit
deployment_toolkit
utils
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Tuple LOGGER = logging.getLogger(__name__) def parse_server_url(server_url: str) -> Tuple[str, str, int]: DEFAULT_PORTS = {"http": 8000, "grpc": 8001} # extract protocol server_url_items = server_url.split("://") if len(server_url_items) != 2: raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001") requested_protocol, server_url = server_url_items requested_protocol = requested_protocol.lower() if requested_protocol not in DEFAULT_PORTS: raise ValueError(f"Unsupported protocol: {requested_protocol}") # extract host and port default_port = DEFAULT_PORTS[requested_protocol] server_url_items = server_url.split(":") if len(server_url_items) == 1: host, port = server_url, default_port elif len(server_url_items) == 2: host, port = server_url_items port = int(port) if port != default_port: LOGGER.warning( f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}" ) else: raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001") return requested_protocol, host, port
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks
networks
bert_classifier
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Trainer network for BERT-style models.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.nlp.modeling import networks @tf.keras.utils.register_keras_serializable(package='Text') class BertClassifier(tf.keras.Model): """Classifier model based on a BERT-style transformer-based encoder. This is an implementation of the network structure surrounding a transformer encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). The BertClassifier allows a user to pass in a transformer stack, and instantiates a classification network based on the passed `num_classes` argument. Attributes: network: A transformer network. This network should output a sequence output and a classification output. Furthermore, it should expose its embedding table via a "get_embedding_table" method. num_classes: Number of classes to predict from the classification network. initializer: The initializer (if any) to use in the classification networks. Defaults to a Glorot uniform initializer. output: The output style for this network. Can be either 'logits' or 'predictions'. """ def __init__(self, network, num_classes, initializer='glorot_uniform', output='logits', dropout_rate=0.1, **kwargs): self._self_setattr_tracking = False self._config = { 'network': network, 'num_classes': num_classes, 'initializer': initializer, 'output': output, } # We want to use the inputs of the passed network as the inputs to this # Model. To do this, we need to keep a handle to the network inputs for use # when we construct the Model object at the end of init. inputs = network.inputs # Because we have a copy of inputs to create this Model object, we can # invoke the Network object with its own input tensors to start the Model. _, cls_output = network(inputs) cls_output = tf.keras.layers.Dropout(rate=dropout_rate)(cls_output) self.classifier = networks.Classification( input_width=cls_output.shape[-1], num_classes=num_classes, initializer=initializer, output=output, name='classification') predictions = self.classifier(cls_output) super(BertClassifier, self).__init__( inputs=inputs, outputs=predictions, **kwargs) def get_config(self): return self._config @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
PyTorch/LanguageModeling/BART/bart/tokenization
tokenization
tokenization_xlm_roberta
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for XLM-RoBERTa model.""" import os from shutil import copyfile from typing import List, Optional from bart.tokenization.tokenization_utils import PreTrainedTokenizer from bart.tokenization.tokenization_xlnet import SPIECE_UNDERLINE from utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "xlm-roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-base-sentencepiece.bpe.model", "xlm-roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-dutch": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-dutch-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll02-spanish": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll02-spanish-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll03-english": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-english-sentencepiece.bpe.model", "xlm-roberta-large-finetuned-conll03-german": "https://s3.amazonaws.com/models.huggingface.co/bert/xlm-roberta-large-finetuned-conll03-german-sentencepiece.bpe.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "xlm-roberta-base": 512, "xlm-roberta-large": 512, "xlm-roberta-large-finetuned-conll02-dutch": 512, "xlm-roberta-large-finetuned-conll02-spanish": 512, "xlm-roberta-large-finetuned-conll03-english": 512, "xlm-roberta-large-finetuned-conll03-german": 512, } class XLMRobertaTokenizer(PreTrainedTokenizer): """ Adapted from RobertaTokenizer and XLNetTokenizer SentencePiece based tokenizer. Peculiarities: - requires `SentencePiece <https://github.com/google/sentencepiece>`_ This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. bos_token (:obj:`string`, `optional`, defaults to "<s>"): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. .. note:: When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the :obj:`cls_token`. eos_token (:obj:`string`, `optional`, defaults to "</s>"): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. sep_token (:obj:`string`, `optional`, defaults to "</s>"): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (:obj:`string`, `optional`, defaults to "<s>"): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (:obj:`string`, `optional`, defaults to "<unk>"): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`string`, `optional`, defaults to "<pad>"): The token used for padding, for example when batching sequences of different lengths. mask_token (:obj:`string`, `optional`, defaults to "<mask>"): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", **kwargs ): super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, **kwargs, ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(str(vocab_file)) self.vocab_file = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token self.fairseq_tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab self.fairseq_offset = 1 self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + self.fairseq_offset self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" "pip install sentencepiece" ) raise self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A XLM-R sequence has the following format: - single sequence: ``<s> X </s>`` - pair of sequences: ``<s> A </s></s> B </s>`` Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-R does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text): return self.sp_model.EncodeAsPieces(text) def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] spm_id = self.sp_model.PieceToId(token) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory): """ Save the sentencepiece vocabulary (copy original file) and special tokens file to a directory. Args: save_directory (:obj:`str`): The directory in which to save the vocabulary. Returns: :obj:`Tuple(str)`: Paths to the files saved. """ if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
PyTorch/Recommendation/DLRM/dlrm/cuda_ext
cuda_ext
__init__
from .dot_based_interact import dotBasedInteract from .fused_gather_embedding import buckle_embedding_fused_gather from .sparse_embedding import JointSparseEmbedding
TensorFlow2/LanguageModeling/BERT/official/utils/misc
misc
model_helpers
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Miscellaneous functions that can be called by models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numbers import tensorflow as tf from tensorflow.python.util import nest def past_stop_threshold(stop_threshold, eval_metric): """Return a boolean representing whether a model should be stopped. Args: stop_threshold: float, the threshold above which a model should stop training. eval_metric: float, the current value of the relevant metric to check. Returns: True if training should stop, False otherwise. Raises: ValueError: if either stop_threshold or eval_metric is not a number """ if stop_threshold is None: return False if not isinstance(stop_threshold, numbers.Number): raise ValueError("Threshold for checking stop conditions must be a number.") if not isinstance(eval_metric, numbers.Number): raise ValueError("Eval metric being checked against stop conditions " "must be a number.") if eval_metric >= stop_threshold: tf.compat.v1.logging.info( "Stop threshold of {} was passed with metric value {}.".format( stop_threshold, eval_metric)) return True return False def generate_synthetic_data( input_shape, input_value=0, input_dtype=None, label_shape=None, label_value=0, label_dtype=None): """Create a repeating dataset with constant values. Args: input_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of the input data. input_value: Value of each input element. input_dtype: Input dtype. If None, will be inferred by the input value. label_shape: a tf.TensorShape object or nested tf.TensorShapes. The shape of the label data. label_value: Value of each input element. label_dtype: Input dtype. If None, will be inferred by the target value. Returns: Dataset of tensors or tuples of tensors (if label_shape is set). """ # TODO(kathywu): Replace with SyntheticDataset once it is in contrib. element = input_element = nest.map_structure( lambda s: tf.constant(input_value, input_dtype, s), input_shape) if label_shape: label_element = nest.map_structure( lambda s: tf.constant(label_value, label_dtype, s), label_shape) element = (input_element, label_element) return tf.data.Dataset.from_tensors(element).repeat() def apply_clean(flags_obj): if flags_obj.clean and tf.io.gfile.exists(flags_obj.model_dir): tf.compat.v1.logging.info("--clean flag set. Removing existing model dir:" " {}".format(flags_obj.model_dir)) tf.io.gfile.rmtree(flags_obj.model_dir)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow
waveglow
waveGlowBuilder
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_WAVEGLOWBUILDER_H #define TT2I_WAVEGLOWBUILDER_H #include "trtPtr.h" #include <memory> #include <string> namespace nvinfer1 { class ICudaEngine; class IBuilder; class INetworkDefinition; class ILogger; } // namespace nvinfer1 namespace tts { class WaveGlowBuilder { public: /** * @brief Create a new WaveGlowBuilder. * * @param modelPath The path of the ONNX file to load. * @param logger The logger to use while parsing. */ WaveGlowBuilder(const std::string& modelPath, std::shared_ptr<nvinfer1::ILogger> logger); /** * @brief Create a new WaveGlow engine. * * @param builder The builder. * @param maxBatchSize The maximum batch size the engine should be able to * handle. * @param useFP16 Whether or not to allow FP16 in the engine. * * @return The built engine. */ TRTPtr<nvinfer1::ICudaEngine> build( nvinfer1::IBuilder& builder, const int maxBatchSize, const bool useFP16); private: std::string mOnnxModelPath; std::shared_ptr<nvinfer1::ILogger> mLogger; }; } // namespace tts #endif
PyTorch/SpeechSynthesis/FastPitch/scripts
scripts
prepare_dataset
#!/usr/bin/env bash set -e : ${DATA_DIR:=LJSpeech-1.1} : ${ARGS="--extract-mels"} python prepare_dataset.py \ --wav-text-filelists filelists/ljs_audio_text.txt \ --n-workers 16 \ --batch-size 1 \ --dataset-path $DATA_DIR \ --extract-pitch \ --f0-method pyin \ $ARGS
TensorFlow2/LanguageModeling/BERT/data
data
SquadDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bz2 import os import urllib.request import sys class SquadDownloader: def __init__(self, save_path): self.save_path = save_path + '/squad' if not os.path.exists(self.save_path): os.makedirs(self.save_path) if not os.path.exists(self.save_path + '/v1.1'): os.makedirs(self.save_path + '/v1.1') if not os.path.exists(self.save_path + '/v2.0'): os.makedirs(self.save_path + '/v2.0') self.download_urls = { 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json' : 'v1.1/train-v1.1.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' : 'v1.1/dev-v1.1.json', 'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/' : 'v1.1/evaluate-v1.1.py', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json' : 'v2.0/train-v2.0.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json' : 'v2.0/dev-v2.0.json', 'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/' : 'v2.0/evaluate-v2.0.py', } def download(self): for item in self.download_urls: url = item file = self.download_urls[item] print('Downloading:', url) if os.path.isfile(self.save_path + '/' + file): print('** Download file already exists, skipping download') else: response = urllib.request.urlopen(url) with open(self.save_path + '/' + file, "wb") as handle: handle.write(response.read())
TensorFlow/LanguageModeling/Transformer-XL/tf/scripts/docker
docker
build
#!/bin/bash # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. docker build . --network=host --rm -t transformer-xl:latest