relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
np_mask_ops_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.np_mask_ops.""" import numpy as np import tensorflow as tf from object_detection.utils import np_mask_ops class MaskOpsTests(tf.test.TestCase): def setUp(self): masks1_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0]], dtype=np.uint8) masks1_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) masks1 = np.stack([masks1_0, masks1_1]) masks2_0 = np.array([[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0]], dtype=np.uint8) masks2_1 = np.array([[1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8) masks2_2 = np.array([[1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0]], dtype=np.uint8) masks2 = np.stack([masks2_0, masks2_1, masks2_2]) self.masks1 = masks1 self.masks2 = masks2 def testArea(self): areas = np_mask_ops.area(self.masks1) expected_areas = np.array([8.0, 10.0], dtype=np.float32) self.assertAllClose(expected_areas, areas) def testIntersection(self): intersection = np_mask_ops.intersection(self.masks1, self.masks2) expected_intersection = np.array( [[8.0, 0.0, 8.0], [0.0, 9.0, 7.0]], dtype=np.float32) self.assertAllClose(intersection, expected_intersection) def testIOU(self): iou = np_mask_ops.iou(self.masks1, self.masks2) expected_iou = np.array( [[1.0, 0.0, 8.0/25.0], [0.0, 9.0 / 16.0, 7.0 / 28.0]], dtype=np.float32) self.assertAllClose(iou, expected_iou) def testIOA(self): ioa21 = np_mask_ops.ioa(self.masks1, self.masks2) expected_ioa21 = np.array([[1.0, 0.0, 8.0/25.0], [0.0, 9.0/15.0, 7.0/25.0]], dtype=np.float32) self.assertAllClose(ioa21, expected_ioa21) if __name__ == '__main__': tf.test.main()
TensorFlow/Segmentation/UNet_Industrial/scripts
scripts
UNet_FP32_8GPU_XLA
#!/usr/bin/env bash # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches UNet training in FP32 on 8 GPUs using 16 batch size (2 per GPU) # Usage ./UNet_FP32_8GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)> BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export TF_CPP_MIN_LOG_LEVEL=3 mpirun \ -np 8 \ -H localhost:8 \ -bind-to none \ -map-by slot \ -x NCCL_DEBUG=VERSION \ -x LD_LIBRARY_PATH \ -x PATH \ -mca pml ob1 -mca btl ^openib \ --allow-run-as-root \ python "${BASEDIR}/../main.py" \ --unet_variant='tinyUNet' \ --activation_fn='relu' \ --exec_mode='train_and_evaluate' \ --iter_unit='batch' \ --num_iter=2500 \ --batch_size=2 \ --warmup_step=10 \ --results_dir="${1}" \ --data_dir="${2}" \ --dataset_name='DAGM2007' \ --dataset_classID="${3}" \ --data_format='NCHW' \ --use_auto_loss_scaling \ --nouse_tf_amp \ --use_xla \ --learning_rate=1e-4 \ --learning_rate_decay_factor=0.8 \ --learning_rate_decay_steps=500 \ --rmsprop_decay=0.9 \ --rmsprop_momentum=0.8 \ --loss_fn_name='adaptive_loss' \ --weight_decay=1e-5 \ --weight_init_method='he_uniform' \ --augment_data \ --display_every=250 \ --debug_verbosity=0
TensorFlow/LanguageModeling/BERT
BERT
README
# BERT For TensorFlow This repository provides a script and recipe to train the BERT model for TensorFlow to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. BERT model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Glossary](#glossary) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Multi-dataset](#multi-dataset) * [Training process](#training-process) * [Pre-training](#pre-training) * [Fine tuning](#fine-tuning) * [Multi-node](#multi-node) * [Inference process](#inference-process) * [Inference Process With TensorRT](#inference-process-with-tensorrt) * [Deploying the BERT model using Triton Inference Server](#deploying-the-bert-model-using-triton-inference-server) * [BioBERT](#biobert) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Pre-training accuracy](#pre-training-accuracy) * [Fine-tuning accuracy for SQuAD v1.1: NVIDIA DGX A100 (8x A100 40GB)](#fine-tuning-accuracy-for-squad-v1.1-nvidia-dgx-a100-8x-a100-40GB) * [Fine-tuning accuracy for GLUE MRPC: NVIDIA DGX A100 (8x A100 40GB)](#fine-tuning-accuracy-for-glue-mrpc-nvidia-dgx-a100-8x-a100-40GB) * [Training stability test](#training-stability-test) * [Pre-training SQuAD v1.1 stability test: NVIDIA DGX A100 (256x A100 40GB)](#pre-training-squad-v1.1-stability-test-nvidia-dgx-a100-256x-a100-40GB) * [Fine-tuning SQuAD v1.1 stability test: NVIDIA DGX A100 (8x A100 40GB)](#fine-tuning-squad-v1.1-stability-test-nvidia-dgx-a100-8x-a100-40GB) * [Fine-tuning GLUE MRPC stability test: NVIDIA DGX A100 (8x A100 40GB)](#fine-tuning-glue-mrpc-stability-test-nvidia-dgx-a100-8x-a100-40GB) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16GB) * [Pre-training training performance: single-node on DGX-1 16GB](#pre-training-training-performance-single-node-on-dgx-1-16GB) * [Fine-tuning training performance for SQuAD v1.1 on DGX-1 16GB](#fine-tuning-training-performance-for-squad-v1.1-on-dgx-1-16GB) * [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32GB) * [Pre-training training performance: single-node on DGX-1 32GB](#pre-training-training-performance-single-node-on-dgx-1-32GB) * [Fine-tuning training performance for SQuAD v1.1 on DGX-1 32GB](#fine-tuning-training-performance-for-squad-v1.1-on-dgx-1-32GB) * [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32GB) * [Pre-training training performance: single-node on DGX-2 32GB](#pre-training-training-performance-single-node-on-dgx-2-32GB) * [Pre-training training performance: multi-node on DGX-2 32GB](#pre-training-training-performance-multi-node-on-dgx-2-32GB) * [Fine-tuning training performance for SQuAD v1.1 on DGX-2 32GB](#fine-tuning-training-performance-for-squad-v1.1-on-dgx-2-32GB) * [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb) * [Pre-training training performance: single-node on DGX A100 40GB](#pre-training-training-performance-single-node-on-dgx-a100-40gb) * [Pre-training training performance: multi-node on DGX A100 40GB](#pre-training-training-performance-multi-node-on-dgx-a100-40gb) * [Fine-tuning training performance for SQuAD v1.1 on DGX A100 40GB](#fine-tuning-training-performance-for-squad-v1.1-on-dgx-a100-40gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16GB) * [Fine-tuning inference performance for SQuAD v1.1 on 16GB](#fine-tuning-inference-performance-for-squad-v1.1-on-16GB) * [Inference performance: NVIDIA DGX-1 (1x V100 32GB)](#inference-performance-nvidia-dgx-1-1x-v100-32GB) * [Fine-tuning inference performance for SQuAD v1.1 on 32GB](#fine-tuning-inference-performance-for-squad-v1.1-on-32GB) * [Inference performance: NVIDIA DGX-2 (1x V100 32GB)](#inference-performance-nvidia-dgx-2-1x-v100-32GB) * [Fine-tuning inference performance for SQuAD v1.1 on DGX-2 32GB](#fine-tuning-inference-performance-for-squad-v1.1-on-dgx-2-32GB) * [Inference performance: NVIDIA DGX A100 (1x A100 40GB)](#inference-performance-nvidia-dgx-a100-1x-a100-40gb) * [Fine-tuning inference performance for SQuAD v1.1 on DGX A100 (1x A100 40GB)](#fine-tuning-inference-performance-for-squad-v1.1-on-dgx-a100-40gb) * [Inference performance: NVIDIA Tesla T4 (1x T4 16GB)](#inference-performance-nvidia-tesla-t4-1x-t4-16GB) * [Fine-tuning inference performance for SQuAD v1.1 on Tesla T4 16GB](#fine-tuning-inference-performance-for-squad-v1.1-on-tesla-t4-16GB) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview BERT, or Bidirectional Encoder Representations from Transformers, is a new method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. This model is based on the [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper. NVIDIA's BERT is an optimized version of [Google's official implementation](https://github.com/google-research/bert), leveraging mixed precision arithmetic and Tensor Cores on A100, V100 and T4 GPUs for faster training times while maintaining target accuracy. Other publicly available implementations of BERT include: 1. [NVIDIA PyTorch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/BERT) 2. [Hugging Face](https://github.com/huggingface/pytorch-pretrained-BERT) 3. [codertimo](https://github.com/codertimo/BERT-pytorch) 4. [gluon-nlp](https://github.com/dmlc/gluon-nlp/tree/v0.10.x/scripts/bert) 5. [Google's official implementation](https://github.com/google-research/bert) This model is trained with mixed precision using Tensor Cores on NVIDIA Volta, Ampere and Turing GPUs. Therefore, researchers can get results up to 4x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture BERT's model architecture is a multi-layer bidirectional Transformer encoder. Based on the model size, we have the following two default configurations of BERT: | **Model** | **Hidden layers** | **Hidden unit size** | **Attention heads** | **Feedforward filter size** | **Max sequence length** | **Parameters** | |:---------:|:----------:|:----:|:---:|:--------:|:---:|:----:| |BERTBASE |12 encoder| 768| 12|4 x 768|512|110M| |BERTLARGE|24 encoder|1024| 16|4 x 1024|512|330M| BERT training consists of two steps, pre-training the language model in an unsupervised fashion on vast amounts of unannotated datasets, and then using this pre-trained model for fine-tuning for various NLP tasks, such as question and answer, sentence classification, or sentiment analysis. Fine-tuning typically adds an extra layer or two for the specific task and further trains the model using a task-specific annotated dataset, starting from the pre-trained backbone weights. The end-to-end process in depicted in the following image: ![](data/images/bert_pipeline.png?raw=true) Figure 1: BERT Pipeline ### Default configuration This repository contains scripts to interactively launch data download, training, benchmarking and inference routines in a Docker container for both pre-training and fine tuning for Question Answering. The major differences between the official implementation of the paper and our version of BERT are as follows: - Mixed precision support with TensorFlow Automatic Mixed Precision (TF-AMP), which enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable. - Scripts to download dataset for: - Pre-training - [Wikipedia](https://dumps.wikimedia.org/), [BookCorpus](http://yknzhu.wixsite.com/mbweb) - Fine tuning - [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) (Stanford Question Answering Dataset) - Fine tuning - [GLUE](https://gluebenchmark.com/) (The General Language Understanding Evaluation benchmark) - Pretrained weights from Google - Custom fused CUDA kernels for faster computations - Multi-GPU/Multi-node support using Horovod The following performance optimizations were implemented in this model: - [XLA](https://www.tensorflow.org/xla) support (experimental). These techniques and optimizations improve model performance and reduce training time, allowing you to perform various NLP tasks with no additional effort. ### Feature support matrix The following features are supported by this model. | **Feature** | **BERT** | |:-----------------------:|:--------------------------:| | Horovod Multi-GPU | Yes | | Horovod Multi-Node | Yes | | Automatic mixed precision (AMP) | Yes | | LAMB | Yes | #### Features Multi-GPU training with Horovod - Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage) [LAMB](https://arxiv.org/pdf/1904.00962.pdf) stands for Layerwise Adaptive Moments based optimizer, is a large batch optimization technique that helps accelerate training of deep neural networks using large minibatches. It allows using a global batch size of 65536 and 32768 on sequence lengths 128 and 512 respectively, compared to a batch size of 256 for Adam. The optimized implementation accumulates 1024 gradient batches in phase 1 and 4096 steps in phase 2 before updating weights once. This results in 27% training speedup on a single DGX2 node. On multi-node systems, LAMB allows scaling up to 1024 GPUs resulting in training speedups of up to 17x in comparison to [Adam](https://arxiv.org/pdf/1412.6980.pdf). Adam has limitations on the learning rate that can be used since it is applied globally on all parameters whereas LAMB follows a layerwise learning rate strategy. NVLAMB adds necessary tweaks to [LAMB version 1](https://arxiv.org/abs/1904.00962v1), to ensure correct convergence. A guide to implementating the LAMB optimizer can be found in our [article](https://medium.com/@NvidiaAI/a-guide-to-optimizer-implementation-for-bert-at-scale-8338cc7f45fd) on Medium.com. The algorithm is as follows: ![NVLAMB](data/images/images_nvlamb.png) ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using [mixed precision training](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) previously required two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. This can now be achieved using Automatic Mixed Precision (AMP) for TensorFlow to enable the full [mixed precision methodology](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#tensorflow) in your existing TensorFlow model code. AMP enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically. The TensorFlow framework code makes all necessary model changes internally. In TF-AMP, the computational graph is optimized to use as few casts as necessary and maximize the use of FP16, and the loss scaling is automatically applied inside of supported optimizers. AMP can be configured to work with the existing tf.contrib loss scaling manager by disabling the AMP scaling with a single environment variable to perform only the automatic mixed-precision optimization. It accomplishes this by automatically rewriting all computation graphs with the necessary operations to enable mixed precision training and automatic loss scaling. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. - APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision Mixed precision is enabled in TensorFlow by using the Automatic Mixed Precision (TF-AMP) extension which casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In TensorFlow, loss scaling can be applied statically by using simple multiplication of loss by a constant value or automatically, by TF-AMP. Automatic mixed precision makes all the adjustments internally in TensorFlow, providing two benefits over manual operations. First, programmers need not modify network model code, reducing development and maintenance effort. Second, using AMP maintains forward and backward compatibility with all the APIs for defining and running TensorFlow models. To enable mixed precision, you can simply add the values to the environmental variables inside your training script: - Enable TF-AMP graph rewrite: ``` os.environ["TF_ENABLE_AUTO_MIXED_PRECISION_GRAPH_REWRITE"] = "1" ``` - Enable Automated Mixed Precision: ``` os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](#https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](#https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Glossary **Fine-tuning** Training an already pretrained model further using a task specific dataset for subject-specific refinements, by adding task-specific layers on top if required. **Language Model** Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence. **Pre-training** Training a model on vast amounts of data on the same (or different) task to build general understandings. **Transformer** The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer that uses an attention mechanism and transforms one sequence into another. ## Setup The following section lists the requirements in order to start training the BERT model. ### Requirements This repository contains `Dockerfile` which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [TensorFlow 20.06-py3+](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container - GPU-based architecture: - [NVIDIA Volta](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) - [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running) For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). For multi-node, the sample provided in this repository requires [Enroot](https://github.com/NVIDIA/enroot) and [Pyxis](https://github.com/NVIDIA/pyxis) set up on a [SLURM](https://slurm.schedmd.com) cluster. More information on how to set up and launch can be found in the [Multi-node Documentation](https://docs.nvidia.com/ngc/multi-node-bert-user-guide). ## Quick Start Guide To pretrain or fine tune your model for Question Answering using mixed precision with Tensor Cores or using FP32/TF32, perform the following steps using the default parameters of the BERT model. 1. Clone the repository. ```bash git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow/LanguageModeling/BERT ``` 2. Build the BERT TensorFlow NGC container. ```bash bash scripts/docker/build.sh ``` 3. Download and preprocess the dataset. This repository provides scripts to download, verify and extract the SQuAD dataset, GLUE dataset and pretrained weights for fine tuning as well as Wikipedia and BookCorpus dataset for pre-training. To download, verify, and extract the required datasets, run: ```bash bash scripts/data_download.sh ``` The script launches a Docker container with the current directory mounted and downloads the datasets to a `data/` folder on the host. Note: For fine tuning only, Wikipedia and Bookscorpus dataset download and preprocessing can be skipped by commenting it out. - Download Wikipedia only for pretraining The pretraining dataset is 170GB+ and takes 15+ hours to download. The BookCorpus server most of the times get overloaded and also contain broken links resulting in HTTP 403 and 503 errors. Hence, it is recommended to skip downloading BookCorpus data by running: `bash scripts/data_download.sh wiki_only` - Download Wikipedia and BookCorpus Users are welcome to download BookCorpus from other sources to match our accuracy, or repeatedly try our script until the required number of files are downloaded by running the following: `bash scripts/data_download.sh wiki_books` Note: Ensure a complete Wikipedia download. If in any case, the download breaks, remove the output file `wikicorpus_en.xml.bz2` and start again. If a partially downloaded file exists, the script assumes successful download which causes the extraction to fail. Not using BookCorpus can potentially change final accuracy on a few downstream tasks. 4. Download the pretrained models from NGC. We have uploaded checkpoints that have been fine tuned and pre-trained for various configurations on the NGC Model Registry. Our data download scripts, by default download some of them but you can browse and download the relevant checkpoints directly from the [NGC model catalog](https://ngc.nvidia.com/catalog/models). Download them to the `data/download/nvidia_pretrained/` to easily access them in your scripts. 5. Start an interactive session in the NGC container to run training/inference. After you build the container image and download the data, you can start an interactive CLI session as follows: ```bash bash scripts/docker/launch.sh ``` The `launch.sh` script assumes that the datasets are in the following locations by default after downloading the data. - SQuAD v1.1 - `data/download/squad/v1.1` - SQuAD v2.0 - `data/download/squad/v2.0` - GLUE The Corpus of Linguistic Acceptability (CoLA) - `data/download/CoLA` - GLUE Microsoft Research Paraphrase Corpus (MRPC) - `data/download/MRPC` - GLUE The Multi-Genre NLI Corpus (MNLI) - `data/download/MNLI` - BERT Large - `data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16` - BERT Base - `data/download/google_pretrained_weights/uncased_L-12_H-768_A-12` - BERT - `data/download/google_pretrained_weights/` - Wikipedia + BookCorpus TFRecords - `data/tfrecords<config>/books_wiki_en_corpus` 6. Start pre-training. BERT is designed to pre-train deep bidirectional representations for language representations. The following scripts are to replicate pre-training on Wikipedia and BookCorpus from the [LAMB paper](https://arxiv.org/pdf/1904.00962.pdf). These scripts are general and can be used for pre-training language representations on any corpus of choice. From within the container, you can use the following script to run pre-training using LAMB. ```bash bash scripts/run_pretraining_lamb.sh <train_batch_size_phase1> <train_batch_size_phase2> <eval_batch_size> <learning_rate_phase1> <learning_rate_phase2> <precision> <use_xla> <num_gpus> <warmup_steps_phase1> <warmup_steps_phase2> <train_steps> <save_checkpoint_steps> <num_accumulation_phase1> <num_accumulation_steps_phase2> <bert_model> ``` For BERT Large FP16 training with XLA using a DGX-1 V100 32GB, run: ```bash bash scripts/run_pretraining_lamb.sh 64 8 8 7.5e-4 5e-4 fp16 true 8 2000 200 7820 100 128 512 large ``` This repository also contains a number of predefined configurations to run the Lamb pretraining on NVIDIA DGX-1, NVIDIA DGX-2H or NVIDIA DGX A100 nodes in `scripts/configs/pretrain_config.sh`. For example, to use the default DGX A100 8 gpu config, run: ```bash bash scripts/run_pretraining_lamb.sh $(source scripts/configs/pretrain_config.sh && dgxa100_8gpu_fp16) ``` Alternatively, to run pre-training with Adam as in the original [BERT paper](https://arxiv.org/pdf/1810.04805.pdf) from within the container, run: ```bash bash scripts/run_pretraining_adam.sh <train_batch_size_per_gpu> <eval_batch_size> <learning_rate_per_gpu> <precision> <use_xla> <num_gpus> <warmup_steps> <train_steps> <save_checkpoint_steps> ``` 7. Start fine tuning. The above pretrained BERT representations can be fine tuned with just one additional output layer for a state-of-the-art Question Answering system. From within the container, you can use the following script to run fine-training for SQuAD. ```bash bash scripts/run_squad.sh <batch_size_per_gpu> <learning_rate_per_gpu> <precision> <use_xla> <num_gpus> <seq_length> <doc_stride> <bert_model> <squad_version> <checkpoint> <epochs> ``` For SQuAD 1.1 FP16 training with XLA using a DGX A100 40GB, run: ```bash bash scripts/run_squad.sh 32 5e-6 fp16 true 8 384 128 large 1.1 data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/bert_model.ckpt 2.0 ``` This repository contains a number of predefined configurations to run the SQuAD fine tuning on NVIDIA DGX-1, NVIDIA DGX-2H or NVIDIA DGX A100 nodes in `scripts/configs/squad_config.sh`. For example, to use the default DGX A100 8 gpu config, run: ```bash bash scripts/run_squad.sh $(source scripts/configs/squad_config.sh && dgxa100_8gpu_fp16) 1.1 data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/bert_model.ckpt 2.0 ``` Alternatively, to run fine tuning on GLUE benchmark, run: ```bash bash scripts/run_glue.sh <task_name> <batch_size_per_gpu> <learning_rate_per_gpu> <precision> <use_xla> <num_gpus> <seq_length> <doc_stride> <bert_model> <epochs> <warmup_proportion> <checkpoint> ``` For MRPC FP16 training with XLA using a DGX A100 40GB, run: ```bash bash scripts/run_glue.sh MRPC 16 3e-6 true 8 128 64 large 3 0.1 ``` The GLUE tasks supported include CoLA, MRPC and MNLI. 8. Start validation/evaluation. The `run_squad_inference.sh` script runs inference on a checkpoint fine tuned for SQuAD and evaluates the validity of predictions on the basis of exact match and F1 score. ```bash bash scripts/run_squad_inference.sh <init_checkpoint> <batch_size> <precision> <use_xla> <seq_length> <doc_stride> <bert_model> <squad_version> ``` For SQuAD 2.0 FP16 inference with XLA using a DGX-1 V100 32GB using checkpoint at `/results/model.ckpt` , run: ```bash bash scripts/run_squad_inference.sh /results/model.ckpt 8 fp16 true 384 128 large 2.0 ``` For SQuAD 1.1 FP32 inference without XLA using a DGX A100 40GB using checkpoint at `/results/model.ckpt`, run: ```bash bash scripts/run_squad_inference.sh /results/model.ckpt 8 fp32 false 384 128 large 1.1 ``` Alternatively, to run inference on GLUE benchmark, run: ```bash bash scripts/run_glue_inference.sh <task_name> <init_checkpoint> <batch_size_per_gpu> <precision> <use_xla> <seq_length> <doc_stride> <bert_model> ``` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: * `run_pretraining.py` - Serves as entry point for pre-training * `run_squad.py` - Serves as entry point for SQuAD training * `run_classifier.py` - Serves as entry point for GLUE training * `Dockerfile` - Container with the basic set of dependencies to run BERT The `scripts/` folder encapsulates all the one-click scripts required for running various functionalities supported such as: * `run_squad.sh` - Runs SQuAD training and inference using `run_squad.py` file * `run_glue.sh` - Runs GLUE training and inference using the `run_classifier.py` file * `run_pretraining_adam.sh` - Runs pre-training with Adam optimizer using the `run_pretraining.py` file * `run_pretraining_lamb.sh` - Runs pre-training with LAMB optimizer using the `run_pretraining.py` file in two phases. Phase 1 does 90% of training with sequence length = 128. In phase 2, the remaining 10% of the training is done with sequence length = 512. * `data_download.sh` - Downloads datasets using files in the `data/` folder * `finetune_train_benchmark.sh` - Captures performance metrics of training for multiple configurations * `finetune_inference_benchmark.sh` - Captures performance metrics of inference for multiple configurations Other folders included in the root directory are: * `data/` - Necessary folders and scripts to download datasets required for fine tuning and pre-training BERT. * `utils/` - Necessary files for preprocessing data before feeding into BERT and hooks for obtaining performance metrics from BERT. ### Parameters Aside from the options to set hyperparameters, the relevant options to control the behaviour of the `run_pretraining.py` script are: ``` --bert_config_file: The config json file corresponding to the pre-trained BERT model. This specifies the model architecture. --init_checkpoint: Initial checkpoint (usually from a pre-trained BERT model). --[no]do_eval: Whether to run evaluation on the dev set.(default: 'false') --[no]do_train: Whether to run training.(evaluation: 'false') --eval_batch_size: Total batch size for eval.(default: '8')(an integer) --[no]horovod: Whether to use Horovod for multi-gpu runs(default: 'false') --[no]amp: Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.(default: 'True') --[no]use_xla: Whether to enable XLA JIT compilation.(default: 'True') --input_files_dir: Input TF example files (can be a dir or comma separated). --output_dir: The output directory where the model checkpoints will be written. --optimizer_type: Optimizer used for training - LAMB or ADAM --num_accumulation_steps: Number of accumulation steps before gradient update. Global batch size = num_accumulation_steps * train_batch_size --allreduce_post_accumulation: Whether to all reduce after accumulation of N steps or after each step ``` Aside from the options to set hyperparameters, some relevant options to control the behaviour of the `run_squad.py` script are: ``` --bert_config_file: The config json file corresponding to the pre-trained BERT model. This specifies the model architecture. --output_dir: The output directory where the model checkpoints will be written. --[no]do_predict: Whether to run evaluation on the dev set. (default: 'false') --[no]do_train: Whether to run training. (default: 'false') --learning_rate: The initial learning rate for Adam.(default: '5e-06')(a number) --max_answer_length: The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.(default: '30')(an integer) --max_query_length: The maximum number of tokens for the question. Questions longer than this will be truncated to this length.(default: '64')(an integer) --max_seq_length: The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.(default: '384')(an integer) --predict_batch_size: Total batch size for predictions.(default: '8')(an integer) --train_batch_size: Total batch size for training.(default: '8')(an integer) --[no]amp: Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.(default: 'True') --[no]use_xla: Whether to enable XLA JIT compilation.(default: 'True') --[no]version_2_with_negative: If true, the SQuAD examples contain some that do not have an answer.(default: 'false') ``` Aside from the options to set hyperparameters, some relevant options to control the behaviour of the `run_classifier.py` script are: ``` --bert_config_file: The config json file corresponding to the pre-trained BERT model. This specifies the model architecture. --data_dir: The input data dir. Should contain the .tsv files (or other data files) for the task. --[no]do_eval: Whether to run eval on the dev set. (default: 'false') --[no]do_predict: Whether to run the model in inference mode on the test set.(default: 'false') --[no]do_train: Whether to run training.(default: 'false') --[no]horovod: Whether to use Horovod for multi-gpu runs(default: 'false') --init_checkpoint: Initial checkpoint (usually from a pre-trained BERT model). --max_seq_length: The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.(default: '128')(an integer) --num_train_epochs: Total number of training epochs to perform.(default: '3.0')(a number) --output_dir: The output directory where the model checkpoints will be written. --task_name: The name of the task to train. --train_batch_size: Total batch size for training.(default: '32')(an integer) --[no]amp: Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.(default: 'True') --[no]use_xla: Whether to enable XLA JIT compilation.(default: 'True') --vocab_file: The vocabulary file that the BERT model was trained on. --warmup_proportion: Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% of training.(default: '0.1')(a number) ``` Note: When initializing from a checkpoint using `--init_checkpoint` and a corpus of your choice, keep in mind that `bert_config_file` and `vocab_file` should remain unchanged. ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option with the Python file, for example: ```bash python run_pretraining.py --help python run_squad.py --help python run_classifier.py --help ``` ### Getting the data For pre-training BERT, we use the concatenation of Wikipedia (2500M words) as well as BookCorpus (800M words). For Wikipedia, we extract only the text passages from [here](ftp://ftpmirror.your.org/pub/wikimedia/dumps/enwiki/latest/enwiki-latest-pages-articles-multistream.xml.bz2) and ignore headers list and tables. It is structured as a document level corpus rather than a shuffled sentence level corpus because it is critical to extract long contiguous sentences. The next step is to run `create_pretraining_data.py` with the document level corpus as input, which generates input data and labels for the masked language modeling and next sentence prediction tasks. Pre-training can also be performed on any corpus of your choice. The collection of data generation scripts are intended to be modular to allow modifications for additional preprocessing steps or to use additional data. They can hence easily be modified for an arbitrary corpus. The preparation of an individual pre-training dataset is described in the `create_datasets_from_start.sh` script found in the `data/` folder. The component steps to prepare the datasets are as follows: 1. Data download and extract - the dataset is downloaded and extracted. 2. Clean and format - document tags, etc. are removed from the dataset. The end result of this step is a `{dataset_name_one_article_per_line}.txt` file that contains the entire corpus. Each line in the text file contains an entire document from the corpus. One file per dataset is created in the `formatted_one_article_per_line` folder. 3. Sharding - the sentence segmented corpus file is split into a number of smaller text documents. The sharding is configured so that a document will not be split between two shards. Sentence segmentation is performed at this time using NLTK. 4. TFRecord file creation - each text file shard is processed by the `create_pretraining_data.py` script to produce a corresponding TFRecord file. The script generates input data and labels for masked language modeling and sentence prediction tasks for the input text shard. For fine tuning BERT for the task of Question Answering, we use SQuAD and GLUE. SQuAD v1.1 has 100,000+ question-answer pairs on 500+ articles. SQuAD v2.0 combines v1.1 with an additional 50,000 new unanswerable questions and must not only answer questions but also determine when that is not possible. GLUE consists of single-sentence tasks, similarity and paraphrase tasks and inference tasks. We support one of each: CoLA, MNLI and MRPC. #### Dataset guidelines The procedure to prepare a text corpus for pre-training is described in the previous section. This section provides additional insight into how exactly raw text is processed so that it is ready for pre-training. First, raw text is tokenized using [WordPiece tokenization](https://arxiv.org/pdf/1609.08144.pdf). A [CLS] token is inserted at the start of every sequence, and the two sentences in the sequence are separated by a [SEP] token. Note: BERT pre-training looks at pairs of sentences at a time. A sentence embedding token [A] is added to the first sentence and token [B] to the next. BERT pre-training optimizes for two unsupervised classification tasks. The first is Masked Language Modelling (Masked LM). One training instance of Masked LM is a single modified sentence. Each token in the sentence has a 15% chance of being replaced by a [MASK] token. The chosen token is replaced with [MASK] 80% of the time, 10% with another random token and the remaining 10% with the same token. The task is then to predict the original token. The second task is next sentence prediction. One training instance of BERT pre-training is two sentences (a sentence pair). A sentence pair may be constructed by simply taking two adjacent sentences from a single document, or by pairing up two random sentences with equal probability. The goal of this task is to predict whether or not the second sentence followed the first in the original document. The `create_pretraining_data.py` script takes in raw text and creates training instances for both pre-training tasks. #### Multi-dataset We are able to combine multiple datasets into a single dataset for pre-training on a diverse text corpus. Once TFRecords have been created for each component dataset, you can create a combined dataset by adding the directory to `SOURCES` in `run_pretraining_*.sh`. This will feed all matching files to the input pipeline in `run_pretraining.py`. However, in the training process, only one TFRecord file is consumed at a time, therefore, the training instances of any given training batch will all belong to the same source dataset. ### Training process The training process consists of two steps: pre-training and fine tuning. #### Pre-training Pre-training is performed using the `run_pretraining.py` script along with parameters defined in the `scripts/run_pretraining_lamb.sh`. The `run_pretraining_lamb.sh` script runs a job on a single node that trains the BERT-large model from scratch using the Wikipedia and BookCorpus datasets as training data. By default, the training script: - Runs on 8 GPUs. - Has FP16 precision enabled. - Is XLA enabled. - Creates a log file containing all the output. - Saves a checkpoint every 100 iterations (keeps only the latest checkpoint) and at the end of training. All checkpoints, evaluation results and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). - Evaluates the model at the end of each phase. - Phase 1 - Runs 7038 steps with 2000 warmup steps - Sets Maximum sequence length as 128 - Sets Global Batch size as 64K - Phase 2 - Runs 1564 steps with 200 warm-up steps - Sets Maximum sequence length as 512 - Sets Global Batch size as 32K - Starts from Phase1's final checkpoint These parameters train Wikipedia and BookCorpus with reasonable accuracy on a DGX-1 with 32GB V100 cards. For example: ```bash scripts/run_pretraining_lamb.sh <train_batch_size_phase1> <train_batch_size_phase2> <eval_batch_size> <learning_rate_phase1> <learning_rate_phase2> <precision> <use_xla> <num_gpus> <warmup_steps_phase1> <warmup_steps_phase2> <train_steps> <save_checkpoint_steps> <num_accumulation_phase1> <num_accumulation_steps_phase2> <bert_model> ``` Where: - `<training_batch_size_phase*>` is per-GPU batch size used for training in the respective phase. Batch size varies with precision, larger batch sizes run more efficiently, but require more memory. - `<eval_batch_size>` is per-GPU batch size used for evaluation after training. - `<learning_rate_phase1>` is the default rate of 1e-4 is good for global batch size 256. - `<learning_rate_phase2>` is the default rate of 1e-4 is good for global batch size 256. - `<precision>` is the type of math in your model, can be either `fp32` or `fp16`. Specifically: - `fp32` is 32-bit IEEE single precision floats. Is enabled by default on V100. - `fp16` is Automatic rewrite of TensorFlow compute graph to take advantage of 16-bit arithmetic whenever it is safe. - `tf32` uses same 10 bit mantissa as fp16 and 8 bit exponent as fp32. Is enabled by default on A100. - `<num_gpus>` is the number of GPUs to use for training. Must be equal to or smaller than the number of GPUs attached to your node. - `<warmup_steps_phase*>` is the number of warm-up steps at the start of training in the respective phase. - `<training_steps>` is the total number of training steps in both phases combined. - `<save_checkpoint_steps>` controls how often checkpoints are saved. Default is 100 steps. - `<num_accumulation_phase*>` is used to mimic higher batch sizes in the respective phase by accumulating gradients N times before weight update. - `<bert_model>` is used to indicate whether to pretrain BERT Large or BERT Base model The following sample code trains BERT-large from scratch on a single DGX-2 using FP16 arithmetic. This will take around 4.5 days. ```bash bert_tf/scripts/run_pretraining_lamb.sh 32 8 8 3.75e-4 2.5e-4 fp16 true 16 2000 200 7820 100 128 256 large ``` #### Fine tuning Fine tuning is performed using the `run_squad.py` script along with parameters defined in `scripts/run_squad.sh`. The `run_squad.sh` script trains a model and performs evaluation on the SQuAD dataset. By default, the training script: - Trains for SQuAD v1.1 dataset. - Trains on BERT Large Model. - Uses 8 GPUs and batch size of 10 on each GPU. - Has FP16 precision enabled. - Is XLA enabled. - Runs for 2 epochs. - Saves a checkpoint every 1000 iterations (keeps only the latest checkpoint) and at the end of training. All checkpoints, evaluation results and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). - Evaluation is done at the end of training. To skip evaluation, modify `--do_predict` to `False`. This script outputs checkpoints to the `/results` directory, by default, inside the container. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file. The training log contains information about: - Loss for the final step - Training and evaluation performance - F1 and exact match score on the Dev Set of SQuAD after evaluation. The summary after training is printed in the following format: ```bash I0312 23:10:45.137036 140287431493376 run_squad.py:1332] 0 Total Training Time = 3007.00 Training Time W/O start up overhead = 2855.92 Sentences processed = 175176 I0312 23:10:45.137243 140287431493376 run_squad.py:1333] 0 Training Performance = 61.3378 sentences/sec I0312 23:14:00.550846 140287431493376 run_squad.py:1396] 0 Total Inference Time = 145.46 Inference Time W/O start up overhead = 131.86 Sentences processed = 10840 I0312 23:14:00.550973 140287431493376 run_squad.py:1397] 0 Inference Performance = 82.2095 sentences/sec {"exact_match": 83.69914853358561, "f1": 90.8477003317459} ``` Multi-GPU training is enabled with the Horovod TensorFlow module. The following example runs fine tuning on 8 GPUs: ```bash BERT_DIR=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16 SQUAD_DIR=data/download/squad/v1.1 mpi_command="mpirun -np 8 -H localhost:8 \ --allow-run-as-root -bind-to none -map-by slot \ -x NCCL_DEBUG=INFO \ -x LD_LIBRARY_PATH \ -x PATH -mca pml ob1 -mca btl ^openib" \ python run_squad.py --horovod --vocab_file=$BERT_DIR/vocab.txt \ --bert_config_file=$BERT_DIR/bert_config.json \ --output_dir=/results --do_train --train_file=$SQUAD_DIR/train-v1.1.json ``` #### Multi-node Multi-node runs can be launched on a pyxis/enroot Slurm cluster (see [Requirements](#requirements)) with the `run.sub` script with the following command for a 4-node DGX1 example for both phase 1 and phase 2: ``` BATCHSIZE=16 LEARNING_RATE='1.875e-4' NUM_ACCUMULATION_STEPS=128 PHASE=1 sbatch -N4 --ntasks-per-node=8 run.sub BATCHSIZE=2 LEARNING_RATE='1.25e-4' NUM_ACCUMULATION_STEPS=512 PHASE=1 sbatch -N4 --ntasks-per-node=8 run.sub ``` Checkpoint after phase 1 will be saved in `checkpointdir` specified in `run.sub`. The checkpoint will be automatically picked up to resume training on phase 2. Note that phase 2 should be run after phase 1. Variables to re-run the [Training performance results](#training-performance-results) are available in the `scripts/configs/configurations.yml` file. The batch variables `BATCHSIZE`, `LEARNING_RATE`, `NUM_ACCUMULATION_STEPS` refer to the Python arguments `train_batch_size`, `learning_rate`, `num_accumulation_steps` respectively. The variable `PHASE` refers to phase specific arguments available in `run.sub`. Note that the `run.sub` script is a starting point that has to be adapted depending on the environment. In particular, variables such as `datadir` handle the location of the files for each phase. Refer to the files contents to see the full list of variables to adjust for your system. ### Inference process Inference on a fine tuned Question Answering system is performed using the `run_squad.py` script along with parameters defined in `scripts/run_squad_inference.sh`. Inference is supported on a single GPU. The `run_squad_inference.sh` script trains a model and performs evaluation on the SQuAD dataset. By default, the inferencing script: - Uses SQuAD v1.1 dataset - Has FP16 precision enabled - Is XLA enabled - Evaluates the latest checkpoint present in `/results` with a batch size of 8 This script outputs predictions file to `/results/predictions.json` and computes F1 score and exact match score using SQuAD's evaluate file. Mount point of `/results` can be changed in the `scripts/docker/launch.sh` file. The output log contains information about: Inference performance Inference Accuracy (F1 and exact match scores) on the Dev Set of SQuAD after evaluation. The summary after inference is printed in the following format: ```bash I0312 23:14:00.550846 140287431493376 run_squad.py:1396] 0 Total Inference Time = 145.46 Inference Time W/O start up overhead = 131.86 Sentences processed = 10840 I0312 23:14:00.550973 140287431493376 run_squad.py:1397] 0 Inference Performance = 82.2095 sentences/sec {"exact_match": 83.69914853358561, "f1": 90.8477003317459} ``` ### Inference Process With TensorRT NVIDIA TensorRT is a platform for high-performance deep learning inference. It includes a deep learning inference optimizer and runtime that delivers low latency and high-throughput for deep learning inference applications. More information on how to perform inference using TensorRT can be found in the subfolder [./trt/README.md](trt/README.md) ### Deploying the BERT model using Triton Inference Server The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. More information on how to perform inference using `Triton Inference Server` can be found in the subfolder `./triton/README.md`. ### BioBERT Many works, including [BioBERT](https://arxiv.org/pdf/1901.08746.pdf), [SciBERT](https://arxiv.org/pdf/1903.10676.pdf), [NCBI-BERT](https://arxiv.org/pdf/1906.05474.pdf), [ClinicalBERT (MIT)](https://arxiv.org/pdf/1904.03323.pdf), [ClinicalBERT (NYU, Princeton)](https://arxiv.org/pdf/1904.05342.pdf), and others at [BioNLP’19 workshop](https://aclweb.org/aclwiki/BioNLP_Workshop), show that pre-training of BERT on large biomedical text corpus such as [PubMed](https://www.ncbi.nlm.nih.gov/pubmed/) results in better performance in biomedical text-mining tasks. More information on how to download a biomedical corpus and pre-train as well as finetune for biomedical tasks can be found in the subfolder `./biobert/README.md`. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. Both of these benchmarking scripts enable you to run a number of epochs, extract performance numbers, and run the BERT model for fine tuning. #### Training performance benchmark Training benchmarking can be performed by running the script: ``` bash scripts/finetune_train_benchmark.sh <bert_model> <use_xla> <num_gpu> squad ``` This script runs 2 epochs by default on the SQuAD v1.1 dataset and extracts performance numbers for various batch sizes and sequence lengths in both FP16 and FP32/TF32. These numbers are saved at `/results/squad_train_benchmark_bert_<bert_model>_gpu_<num_gpu>.log`. #### Inference performance benchmark Inference benchmarking can be performed by running the script: ``` bash scripts/finetune_inference_benchmark.sh squad ``` This script runs 1024 eval iterations by default on the SQuAD v1.1 dataset and extracts performance and latency numbers for various batch sizes and sequence lengths in both FP16 and FP32/TF32, for base and large models. These numbers are saved at `/results/squad_inference_benchmark_bert_<bert_model>.log`. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference for pre-training using LAMB optimizer as well as fine tuning for Question Answering. All results are on BERT-large model unless otherwise mentioned. All fine tuning results are on SQuAD v1.1 using a sequence length of 384 unless otherwise mentioned. #### Training accuracy results ##### Training accuracy ###### Pre-training accuracy Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container. | **DGX System** | **Nodes x GPUs** | **Precision** | **Batch Size/GPU: Phase1, Phase2** | **Accumulation Steps: Phase1, Phase2** | **Time to Train (Hrs)** | **Final Loss** | |----------------|-----------|---------------|------------------------------------|----------------------------------------|----------------|-------------------------| | DGX2H | 32 x 16 | FP16 | 64, 8 | 2, 8 | 2.63 | 1.59 | | DGX2H | 32 x 16 | FP32 | 32, 8 | 4, 8 | 8.48 | 1.56 | | DGXA100 | 32 x 8 | FP16 | 64, 16 | 4, 8 | 3.24 | 1.56 | | DGXA100 | 32 x 8 | TF32 | 64, 8 | 4, 16 | 4.58 | 1.58 | Note: Time to train includes upto 16 minutes of start up time for every restart (atleast once for each phase). Experiments were run on clusters with a maximum wall clock time of 8 hours. ###### Fine-tuning accuracy for SQuAD v1.1: NVIDIA DGX A100 (8x A100 40G) Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs. | **GPUs** | **Batch size / GPU: TF32, FP16 ** | **Accuracy - TF32** | **Accuracy - mixed precision** | **Time to Train - TF32 (Hrs)** | **Time to Train - mixed precision (Hrs)** | |:---:|:----:|:----:|:---:|:----:|:----:| | 8 | 16, 24 |91.41 |91.52 |0.26|0.26| ###### Fine-tuning accuracy for GLUE MRPC: NVIDIA DGX A100 (8x A100 40G) Our results were obtained by running the `scripts/run_glue.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs for 10 different seeds and picking the maximum accuracy on MRPC dev set. | **GPUs** | **Batch size / GPU** | **Accuracy - TF32** | **Accuracy - mixed precision** | **Time to Train - TF32 (Hrs)** | **Time to Train - mixed precision (Hrs)** | **Throughput - TF32** | **Throughput - mixed precision ** | |:---:|:----:|:----:|:---:|:----:|:----:|:----:|:----:| | 8 | 16 | 87.99 | 87.09 |0.009 | 0.009 |357.91|230.16| ##### Training stability test ###### Pre-training SQuAD v1.1 stability test: NVIDIA DGX A100 (256x A100 40GB) The following tables compare `Final Loss` scores across 2 different training runs with different seeds, for both FP16 and TF32. The runs showcase consistent convergence on all 2 seeds with very little deviation. | **FP16, 256x GPUs** | **seed 1** | **seed 2** | **mean** | **std** | |:-----------:|:-----:|:-----:|:-----:|:-----:| |Final Loss |1.570 |1.561 |1.565 |0.006 | | **TF32, 256x GPUs** | **seed 1** | **seed 2** | **mean** | **std** | |:-----------:|:-----:|:-----:|:-----:|:-----:| |Final Loss |1.583 |1.582 |1.582 |0.0007 | ###### Fine-tuning SQuAD v1.1 stability test: NVIDIA DGX A100 (8x A100 40GB) The following tables compare `F1` scores across 5 different training runs with different seeds, for both FP16 and TF32 respectively using (Nvidia's Pretrained Checkpoint)[https://ngc.nvidia.com/catalog/models/nvidia:bert_tf_pretraining_lamb_16n]. The runs showcase consistent convergence on all 5 seeds with very little deviation. | **FP16, 8x GPUs** | **seed 1** | **seed 2** | **seed 3** | **seed 4** | **seed 5** | **mean** | **std** | |:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |F1 |91.61|91.04|91.59|91.32|91.52|91.41|0.24| | **TF32, 8x GPUs** | **seed 1** | **seed 2** | **seed 3** | **seed 4** | **seed 5** | **mean** | **std** | |:-----------:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:|:-----:| |F1 |91.50|91.49|91.64|91.29|91.67|91.52|0.15 | ###### Fine-tuning GLUE MRPC stability test: NVIDIA DGX A100 (8x A100 40GB) The following tables compare `F1` scores across 10 different training runs with different seeds, for both FP16 and TF32 respectively using (Nvidia's Pretrained Checkpoint)[https://ngc.nvidia.com/catalog/models/nvidia:bert_tf_pretraining_lamb_16n]. The runs showcase consistent convergence on all 10 seeds with very little deviation. | ** FP16, 8 GPUs ** | ** seed 1 ** | ** seed 2 ** | ** seed 3 ** | ** seed 4 ** | ** seed 5 ** | ** seed 6 ** | ** seed 7 ** | ** seed 8 ** | ** seed 9 ** | ** seed 10 ** | ** Mean ** | ** Std ** | |--------------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|---------------|-------------|-------------| | Eval Accuracy | 84.31372643 | 85.78431606 | 86.76471114 | 87.00980544 | 86.27451062 | 86.27451062 | 85.5392158 | 86.51961088 | 86.27451062 | 85.2941215 | 86.00490391 | 0.795887906 | | ** TF32, 8 GPUs ** | ** seed 1 ** | ** seed 2 ** | ** seed 3 ** | ** seed 4 ** | ** seed 5 ** | ** seed 6 ** | ** seed 7 ** | ** seed 8 ** | ** seed 9 ** | ** seed 10 ** | ** Mean ** | ** Std ** | |--------------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|--------------|---------------|------------|--------------| | Eval Accuracy | 87.00980544 | 86.27451062 | 87.99020052 | 86.27451062 | 86.02941632 | 87.00980544 | 86.27451062 | 86.51961088 | 87.74510026 | 86.02941632 | 86.7156887 | 0.7009024515 | #### Training performance results ##### Training performance: NVIDIA DGX-1 (8x V100 16GB) ###### Pre-training training performance: single-node on DGX-1 16GB Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|-----------------------|-------------------------------------------------|------------------------------------|-------------------------| | 1 | 128 | 16 , 8 | 4096, 8192 | 65536 | 134.34 | 39.43 | 3.41 | 1.00 | 1.00 | | 4 | 128 | 16 , 8 | 1024, 2048 | 65536 | 449.68 | 152.33 | 2.95 | 3.35 | 3.86 | | 8 | 128 | 16 , 8 | 512, 1024 | 65536 | 1001.39 | 285.79 | 3.50 | 7.45 | 7.25 | | 1 | 512 | 4 , 2 | 8192, 16384 | 32768 | 28.72 | 9.80 | 2.93 | 1.00 | 1.00 | | 4 | 512 | 4 , 2 | 2048, 4096 | 32768 | 109.96 | 35.32 | 3.11 | 3.83 | 3.60 | | 8 | 512 | 4 , 2 | 1024, 2048 | 32768 | 190.65 | 69.53 | 2.74 | 6.64 | 7.09 | Note: The respective values for FP32 runs that use a batch size of 16, 4 in sequence lengths 128 and 512 respectively are not available due to out of memory errors that arise. ###### Fine-tuning training performance for SQuAD v1.1 on DGX-1 16GB Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 16GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Batch size / GPU: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** | |----------|---------------------------------------------|----------------------------------|-----------------------|--------------------------------------------------|-------------------------|------------------------------------| | 1 | 4,2 | 29.74 | 7.36 | 4.04 | 1.00 | 1.00 | | 4 | 4,2 | 97.28 | 26.64 | 3.65 | 3.27 | 3.62 | | 8 | 4,2 | 189.77 | 52.39 | 3.62 | 6.38 | 7.12 | Note: The respective values for FP32 runs that use a batch size of 4 are not available due to out of memory errors that arise. To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Training performance: NVIDIA DGX-1 (8x V100 32GB) ###### Pre-training training performance: single-node on DGX-1 32GB Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 32GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|-----------------------|-------------------------------------------------|------------------------------------|-------------------------| | 1 | 128 | 64 , 32 | 1024, 2048 | 65536 | 168.63 | 46.78 | 3.60 | 1.00 | 1.00 | | 4 | 128 | 64 , 32 | 256, 512 | 65536 | 730.25 | 179.73 | 4.06 | 4.33 | 3.84 | | 8 | 128 | 64 , 32 | 128, 256 | 65536 | 1443.05 | 357.00 | 4.04 | 8.56 | 7.63 | | 1 | 512 | 8 , 8 | 4096, 4096 | 32768 | 31.23 | 10.67 | 2.93 | 1.00 | 1.00 | | 4 | 512 | 8 , 8 | 1024, 1024 | 32768 | 118.84 | 39.55 | 3.00 | 3.81 | 3.71 | | 8 | 512 | 8 , 8 | 512, 512 | 32768 | 255.64 | 81.42 | 3.14 | 8.19 | 7.63 | Note: The respective values for FP32 runs that use a batch size of 64 in sequence lengths 128 are not available due to out of memory errors that arise. ###### Fine-tuning training performance for SQuAD v1.1 on DGX-1 32GB Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 8x V100 32GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Batch size / GPU: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** | |----------|---------------------------------------------|----------------------------------|-----------------------|--------------------------------------------------|-------------------------|------------------------------------| | 1 | 24, 10 | 51.02 | 10.42 | 4.90 | 1.00 | 1.00 | | 4 | 24, 10 | 181.37 | 39.77 | 4.56 | 3.55 | 3.82 | | 8 | 24, 10 | 314.6 | 79.37 | 3.96 | 6.17 | 7.62 | Note: The respective values for FP32 runs that use a batch size of 24 are not available due to out of memory errors that arise. To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Training performance: NVIDIA DGX-2 (16x V100 32GB) ###### Pre-training training performance: single-node on DGX-2 32GB Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|-------------------------| | 1 | 128 | 64 , 32 | 1024 , 8192 | 65536 | 188.04 | 35.32 | 5.32 | 1.00 | 1.00 | | 4 | 128 | 64 , 32 | 256 , 2048 | 65536 | 790.89 | 193.08 | 4.10 | 4.21 | 5.47 | | 8 | 128 | 64 , 32 | 128 , 1024 | 65536 | 1556.89 | 386.89 | 4.02 | 8.28 | 10.95 | | 16 | 128 | 64 , 32 | 64 , 128 | 65536 | 3081.69 | 761.92 | 4.04 | 16.39 | 21.57 | | 1 | 512 | 8 , 8 | 4096 , 4096 | 32768 | 35.32 | 11.67 | 3.03 | 1.00 | 1.00 | | 4 | 512 | 8 , 8 | 1024 , 1024 | 32768 | 128.98 | 42.84 | 3.01 | 3.65 | 3.67 | | 8 | 512 | 8 , 8 | 512 , 512 | 32768 | 274.04 | 86.78 | 3.16 | 7.76 | 7.44 | | 16 | 512 | 8 , 8 | 256 , 256 | 32768 | 513.43 | 173.26 | 2.96 | 14.54 | 14.85 | Note: The respective values for FP32 runs that use a batch size of 64 in sequence lengths 128 are not available due to out of memory errors that arise. ###### Pre-training training performance: multi-node on DGX-2H 32GB Our results were obtained by running the `run.sub` training script in the TensorFlow 19.08-py3 NGC container using multiple NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sentences per second) is the steady state throughput. | **Num Nodes** | **Sequence Length** | **Batch size / GPU: mixed precision, FP32** | **Gradient Accumulation: mixed precision, FP32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:-------------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|-------------------------| | 1 | 128 | 64 , 32 | 64 , 128 | 65536 | 3081.69 | 761.92 | 4.04 | 1.00 | 1.00 | | 4 | 128 | 64 , 32 | 16 , 32 | 65536 | 13192.00 | 3389.83 | 3.89 | 4.28 | 4.45 | | 16 | 128 | 64 , 32 | 4 , 8 | 65536 | 48223.00 | 13217.78 | 3.65 | 15.65 | 17.35 | | 32 | 128 | 64 , 32 | 2 , 4 | 65536 | 86673.64 | 25142.26 | 3.45 | 28.13 | 33.00 | | 1 | 512 | 8 , 8 | 256 , 256 | 32768 | 577.79 | 173.26 | 3.33 | 1.00 | 1.00 | | 4 | 512 | 8 , 8 | 64 , 64 | 32768 | 2284.23 | 765.04 | 2.99 | 3.95 | 4.42 | | 16 | 512 | 8 , 8 | 16 , 16 | 32768 | 8853.00 | 3001.43 | 2.95 | 15.32 | 17.32 | | 32 | 512 | 8 , 8 | 8 , 8 | 32768 | 17059.00 | 5893.14 | 2.89 | 29.52 | 34.01 | Note: The respective values for FP32 runs that use a batch size of 64 in sequence lengths 128 are not available due to out of memory errors that arise. ###### Fine-tuning training performance for SQuAD v1.1 on DGX-2 32GB Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-2 with 16x V100 32GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Batch size / GPU: mixed precision, FP32** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 to mixed precision)** | **Weak scaling - FP32** | **Weak scaling - mixed precision** | |----------|---------------------------------------------|----------------------------------|-----------------------|--------------------------------------------------|-------------------------|------------------------------------| | 1 | 24, 10 | 55.28 | 11.15 | 4.96 | 1.00 | 1.00 | | 4 | 24, 10 | 199.53 | 42.91 | 4.65 | 3.61 | 3.85 | | 8 | 24, 10 | 341.55 | 85.08 | 4.01 | 6.18 | 7.63 | | 16 | 24, 10 | 683.37 | 156.29 | 4.37 | 12.36 | 14.02 | Note: The respective values for FP32 runs that use a batch size of 24 are not available due to out of memory errors that arise. To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Training performance: NVIDIA DGX A100 (8x A100 40GB) ###### Pre-training training performance: single-node on DGX A100 40GB Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Sequence Length** | **Batch size / GPU: mixed precision, TF32** | **Gradient Accumulation: mixed precision, TF32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling -TF32** | |:--------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|------------------------| | 1 | 128 | 64 , 64 | 1024 , 1024 | 65536 | 356.845 | 238.10 | 1.50 | 1.00 | 1.00 | | 4 | 128 | 64 , 64 | 256 , 256 | 65536 | 1422.25 | 952.39 | 1.49 | 3.99 | 4.00 | | 8 | 128 | 64 , 64 | 128 , 128 | 65536 | 2871.89 | 1889.71 | 1.52 | 8.05 | 7.94 | | 1 | 512 | 16 , 8 | 2048 , 4096 | 32768 | 70.856 | 39.96 | 1.77 | 1.00 | 1.00 | | 4 | 512 | 16 , 8 | 512 , 1024 | 32768 | 284.912 | 160.16 | 1.78 | 4.02 | 4.01 | | 8 | 512 | 16 , 8 | 256 , 512 | 32768 | 572.112 | 316.51 | 1.81 | 8.07 | 7.92 | Note: The respective values for TF32 runs that use a batch size of 16 for sequence length 512 are not available due to out of memory errors that arise. ###### Pre-training training performance: multi-node on DGX A100 40GB Our results were obtained by running the `scripts/run_pretraining_lamb.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs. Performance (in sentences per second) is the steady state throughput. | **Num Nodes** | **Sequence Length** | **Batch size / GPU: mixed precision, TF32** | **Gradient Accumulation: mixed precision, TF32** | **Global Batch Size** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling -TF32** | |:-------------:|:-------------------:|:-------------------------------------------:|--------------------------------------------------|:---------------------:|:--------------------------------:|:---------------------:|-------------------------------------------------|------------------------------------|------------------------| | 1 | 128 | 64 , 64 | 128 , 128 | 65536 | 2871.89 | 1889.71 | 1.52 | 1.00 | 1.00 | | 4 | 128 | 64 , 64 | 32 , 32 | 65536 | 11159 | 7532.00 | 1.48 | 3.89 | 3.99 | | 16 | 128 | 64 , 64 | 8 , 8 | 65536 | 41144 | 28605.62 | 1.44 | 14.33 | 15.14 | | 32 | 128 | 64 , 64 | 4 , 4 | 65536 | 77479.87 | 53585.82 | 1.45 | 26.98 | 28.36 | | 1 | 512 | 16 , 8 | 256 , 512 | 32768 | 572.112 | 316.51 | 1.81 | 1.00 | 1.00 | | 4 | 512 | 16 , 8 | 128 , 128 | 65536 | 2197.44 | 1268.43 | 1.73 | 3.84 | 4.01 | | 16 | 512 | 16 , 8 | 32 , 32 | 65536 | 8723.1 | 4903.39 | 1.78 | 15.25 | 15.49 | | 32 | 512 | 16 , 8 | 16 , 16 | 65536 | 16705 | 9463.80 | 1.77 | 29.20 | 29.90 | Note: The respective values for TF32 runs that use a batch size of 16 for sequence length 512 are not available due to out of memory errors that arise. ###### Fine-tuning training performance for SQuAD v1.1 on DGX A100 40GB Our results were obtained by running the `scripts/run_squad.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 8x A100 40GB GPUs. Performance (in sentences per second) is the steady state throughput. | **GPUs** | **Batch size / GPU: mixed precision, TF32** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 to mixed precision)** | **Weak scaling - TF32** | **Weak scaling - mixed precision** | |----------|---------------------------------------------|----------------------------------|-----------------------|--------------------------------------------------|-------------------------|------------------------------------| | 1 | 32, 16 | 102.26 | 61.364 | 1.67 | 1.00 | 1.00 | | 4 | 32, 16 | 366.353 | 223.187 | 1.64 | 3.64 | 3.58 | | 8 | 32, 16 | 767.071 | 440.47 | 1.74 | 7.18 | 7.50 | Note: The respective values for TF32 runs that use a batch size of 32 are not available due to out of memory errors that arise. To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. #### Inference performance results ##### Inference performance: NVIDIA DGX-1 (1x V100 16GB) ###### Fine-tuning inference performance for SQuAD v1.1 on 16GB Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 1x V100 16GB GPUs. Performance numbers (throughput in sentences per second and latency in milliseconds) were averaged from 1024 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining. | Model | Sequence Length | Batch Size | Precision | Throughput-Average(sent/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) | |-------|-----------------|------------|-----------|------------------------------|---------------------|-----------------|-----------------|-----------------| | base | 128 | 1 | fp16 | 206.82 | 7.96 | 4.98 | 5.04 | 5.23 | | base | 128 | 2 | fp16 | 376.75 | 8.68 | 5.42 | 5.49 | 5.64 | | base | 128 | 4 | fp16 | 635 | 12.31 | 6.46 | 6.55 | 6.83 | | base | 128 | 8 | fp16 | 962.83 | 13.64 | 8.47 | 8.56 | 8.75 | | base | 384 | 1 | fp16 | 167.01 | 12.77 | 6.12 | 6.23 | 6.52 | | base | 384 | 2 | fp16 | 252.12 | 21.05 | 8.03 | 8.09 | 8.61 | | base | 384 | 4 | fp16 | 341.95 | 25.09 | 11.88 | 11.96 | 12.52 | | base | 384 | 8 | fp16 | 421.26 | 33.16 | 19.2 | 19.37 | 19.91 | | | | | | | | | | | | base | 128 | 1 | fp32 | 174.48 | 8.17 | 5.89 | 5.95 | 6.12 | | base | 128 | 2 | fp32 | 263.67 | 10.33 | 7.66 | 7.69 | 7.92 | | base | 128 | 4 | fp32 | 349.34 | 16.31 | 11.57 | 11.62 | 11.87 | | base | 128 | 8 | fp32 | 422.88 | 23.27 | 19.23 | 19.38 | 20.38 | | base | 384 | 1 | fp32 | 99.52 | 14.99 | 10.19 | 10.23 | 10.78 | | base | 384 | 2 | fp32 | 118.01 | 25.98 | 17.12 | 17.18 | 17.78 | | base | 384 | 4 | fp32 | 128.1 | 41 | 31.56 | 31.7 | 32.39 | | base | 384 | 8 | fp32 | 136.1 | 69.77 | 59.44 | 59.66 | 60.51 | | | | | | | | | | | | large | 128 | 1 | fp16 | 98.63 | 15.86 | 10.27 | 10.31 | 10.46 | | large | 128 | 2 | fp16 | 172.59 | 17.78 | 11.81 | 11.86 | 12.13 | | large | 128 | 4 | fp16 | 272.86 | 25.66 | 14.86 | 14.94 | 15.18 | | large | 128 | 8 | fp16 | 385.64 | 30.74 | 20.98 | 21.1 | 21.68 | | large | 384 | 1 | fp16 | 70.74 | 26.85 | 14.38 | 14.47 | 14.7 | | large | 384 | 2 | fp16 | 99.9 | 45.29 | 20.26 | 20.43 | 21.11 | | large | 384 | 4 | fp16 | 128.42 | 56.94 | 31.44 | 31.71 | 32.45 | | large | 384 | 8 | fp16 | 148.57 | 81.69 | 54.23 | 54.54 | 55.53 | | | | | | | | | | | | large | 128 | 1 | fp32 | 76.75 | 17.06 | 13.21 | 13.27 | 13.4 | | large | 128 | 2 | fp32 | 100.82 | 24.34 | 20.05 | 20.13 | 21.13 | | large | 128 | 4 | fp32 | 117.59 | 41.76 | 34.42 | 34.55 | 35.29 | | large | 128 | 8 | fp32 | 130.42 | 68.59 | 62 | 62.23 | 62.98 | | large | 384 | 1 | fp32 | 33.95 | 37.89 | 29.82 | 29.98 | 30.56 | | large | 384 | 2 | fp32 | 38.47 | 68.35 | 52.56 | 52.74 | 53.89 | | large | 384 | 4 | fp32 | 41.11 | 114.27 | 98.19 | 98.54 | 99.54 | | large | 384 | 8 | fp32 | 41.32 | 213.84 | 194.92 | 195.36 | 196.94 | To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Inference performance: NVIDIA DGX-1 (1x V100 32GB) ###### Fine-tuning inference performance for SQuAD v1.1 on 32GB Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-1 with 1x V100 32GB GPUs. Performance numbers (throughput in sentences per second and latency in milliseconds) were averaged from 1024 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining. | Model | Sequence Length | Batch Size | Precision | Throughput-Average(sent/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) | |-------|-----------------|------------|-----------|------------------------------|---------------------|-----------------|-----------------|-----------------| | base | 128 | 1 | fp16 | 207.87 | 7.63 | 4.94 | 5.03 | 5.32 | | base | 128 | 2 | fp16 | 376.44 | 8.47 | 5.44 | 5.5 | 5.68 | | base | 128 | 4 | fp16 | 642.55 | 11.63 | 6.3 | 6.36 | 6.68 | | base | 128 | 8 | fp16 | 943.85 | 13.24 | 8.56 | 8.68 | 8.92 | | base | 384 | 1 | fp16 | 162.62 | 12.24 | 6.31 | 6.4 | 6.73 | | base | 384 | 2 | fp16 | 244.15 | 20.05 | 8.34 | 8.41 | 8.93 | | base | 384 | 4 | fp16 | 338.68 | 23.53 | 11.88 | 11.92 | 12.63 | | base | 384 | 8 | fp16 | 407.46 | 32.72 | 19.84 | 20.06 | 20.89 | | | | | | | | | | | | base | 128 | 1 | fp32 | 175.16 | 8.31 | 5.85 | 5.89 | 6.04 | | base | 128 | 2 | fp32 | 261.31 | 10.48 | 7.75 | 7.81 | 8.08 | | base | 128 | 4 | fp32 | 339.45 | 16.67 | 11.95 | 12.02 | 12.46 | | base | 128 | 8 | fp32 | 406.67 | 24.12 | 19.86 | 19.97 | 20.41 | | base | 384 | 1 | fp32 | 98.33 | 15.28 | 10.27 | 10.32 | 10.76 | | base | 384 | 2 | fp32 | 114.92 | 26.88 | 17.55 | 17.59 | 18.29 | | base | 384 | 4 | fp32 | 125.76 | 41.74 | 32.06 | 32.23 | 33.72 | | base | 384 | 8 | fp32 | 136.62 | 69.78 | 58.95 | 59.19 | 60 | | | | | | | | | | | | large | 128 | 1 | fp16 | 96.46 | 15.56 | 10.56 | 10.66 | 11.02 | | large | 128 | 2 | fp16 | 168.31 | 17.42 | 12.11 | 12.25 | 12.57 | | large | 128 | 4 | fp16 | 267.76 | 24.76 | 15.17 | 15.36 | 16.68 | | large | 128 | 8 | fp16 | 378.28 | 30.34 | 21.39 | 21.54 | 21.97 | | large | 384 | 1 | fp16 | 68.75 | 26.02 | 14.77 | 14.94 | 15.3 | | large | 384 | 2 | fp16 | 95.41 | 44.01 | 21.24 | 21.47 | 22.01 | | large | 384 | 4 | fp16 | 124.43 | 55.14 | 32.53 | 32.83 | 33.58 | | large | 384 | 8 | fp16 | 143.02 | 81.37 | 56.51 | 56.88 | 58.05 | | | | | | | | | | | | large | 128 | 1 | fp32 | 75.34 | 17.5 | 13.46 | 13.52 | 13.7 | | large | 128 | 2 | fp32 | 99.73 | 24.7 | 20.27 | 20.38 | 21.45 | | large | 128 | 4 | fp32 | 116.92 | 42.1 | 34.49 | 34.59 | 34.98 | | large | 128 | 8 | fp32 | 130.11 | 68.95 | 62.03 | 62.23 | 63.3 | | large | 384 | 1 | fp32 | 33.84 | 38.15 | 29.75 | 29.89 | 31.23 | | large | 384 | 2 | fp32 | 38.02 | 69.31 | 53.1 | 53.36 | 54.42 | | large | 384 | 4 | fp32 | 41.2 | 114.34 | 97.96 | 98.32 | 99.55 | | large | 384 | 8 | fp32 | 42.37 | 209.16 | 190.18 | 190.66 | 192.77 | To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Inference performance: NVIDIA DGX-2 (1x V100 32GB) ###### Fine-tuning inference performance for SQuAD v1.1 on DGX-2 32GB Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX-2 with 1x V100 32GB GPUs. Performance numbers (throughput in sentences per second and latency in milliseconds) were averaged from 1024 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining. | Model | Sequence Length | Batch Size | Precision | Throughput-Average(sent/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) | |-------|-----------------|------------|-----------|------------------------------|---------------------|-----------------|-----------------|-----------------| | base | 128 | 1 | fp16 | 220.35 | 7.82 | 4.7 | 4.83 | 5.15 | | base | 128 | 2 | fp16 | 384.55 | 8.7 | 5.49 | 5.68 | 6.01 | | base | 128 | 4 | fp16 | 650.7 | 36.3 | 6.35 | 6.51 | 6.87 | | base | 128 | 8 | fp16 | 992.41 | 13.59 | 8.22 | 8.37 | 8.96 | | base | 384 | 1 | fp16 | 172.89 | 12.86 | 5.94 | 6.04 | 6.44 | | base | 384 | 2 | fp16 | 258.48 | 20.42 | 7.89 | 8.09 | 9.15 | | base | 384 | 4 | fp16 | 346.34 | 24.93 | 11.97 | 12.12 | 12.76 | | base | 384 | 8 | fp16 | 430.4 | 33.08 | 18.75 | 19.27 | 20.12 | | | | | | | | | | | | base | 128 | 1 | fp32 | 183.69 | 7.52 | 5.86 | 5.97 | 6.27 | | base | 128 | 2 | fp32 | 282.95 | 9.51 | 7.31 | 7.49 | 7.83 | | base | 128 | 4 | fp32 | 363.83 | 15.12 | 11.35 | 11.47 | 11.74 | | base | 128 | 8 | fp32 | 449.12 | 21.65 | 18 | 18.1 | 18.6 | | base | 384 | 1 | fp32 | 104.92 | 13.8 | 9.9 | 9.99 | 10.48 | | base | 384 | 2 | fp32 | 123.55 | 24.21 | 16.29 | 16.4 | 17.61 | | base | 384 | 4 | fp32 | 139.38 | 36.69 | 28.89 | 29.04 | 30.01 | | base | 384 | 8 | fp32 | 146.28 | 64.69 | 55.09 | 55.32 | 56.3 | | | | | | | | | | | | large | 128 | 1 | fp16 | 98.34 | 15.85 | 10.61 | 10.78 | 11.5 | | large | 128 | 2 | fp16 | 172.95 | 17.8 | 11.91 | 12.08 | 12.42 | | large | 128 | 4 | fp16 | 278.82 | 25.18 | 14.7 | 14.87 | 15.65 | | large | 128 | 8 | fp16 | 402.28 | 30.45 | 20.21 | 20.43 | 21.24 | | large | 384 | 1 | fp16 | 71.1 | 26.55 | 14.44 | 14.61 | 15.32 | | large | 384 | 2 | fp16 | 100.48 | 44.04 | 20.31 | 20.48 | 21.6 | | large | 384 | 4 | fp16 | 131.68 | 56.19 | 30.8 | 31.03 | 32.3 | | large | 384 | 8 | fp16 | 151.81 | 81.53 | 53.22 | 53.87 | 55.34 | | | | | | | | | | | | large | 128 | 1 | fp32 | 77.87 | 16.33 | 13.33 | 13.45 | 13.77 | | large | 128 | 2 | fp32 | 105.41 | 22.77 | 19.39 | 19.52 | 19.86 | | large | 128 | 4 | fp32 | 124.16 | 38.61 | 32.69 | 32.88 | 33.9 | | large | 128 | 8 | fp32 | 137.69 | 64.61 | 58.62 | 58.89 | 59.94 | | large | 384 | 1 | fp32 | 36.34 | 34.94 | 27.72 | 27.81 | 28.21 | | large | 384 | 2 | fp32 | 41.11 | 62.54 | 49.14 | 49.32 | 50.25 | | large | 384 | 4 | fp32 | 43.32 | 107.53 | 93.07 | 93.47 | 94.27 | | large | 384 | 8 | fp32 | 44.64 | 196.28 | 180.21 | 180.75 | 182.41 | ##### Inference performance: NVIDIA DGX A100 (1x A100 40GB) ###### Fine-tuning inference performance for SQuAD v1.1 on DGX A100 40GB Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA DGX A100 with 1x A100 40GB GPUs. Performance numbers (throughput in sentences per second and latency in milliseconds) were averaged from 1024 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining. | Model | Sequence Length | Batch Size | Precision | Throughput-Average(sent/sec) | Latency-Average(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) | |-------|-----------------|------------|-----------|------------------------------|---------------------|-----------------|-----------------|-----------------| | base | 128 | 1 | fp16 | 231.37 | 6.43 | 4.57 | 4.68 | 4.93 | | base | 128 | 2 | fp16 | 454.54 | 6.77 | 4.66 | 4.77 | 4.96 | | base | 128 | 4 | fp16 | 842.34 | 8.8 | 4.91 | 4.98 | 5.39 | | base | 128 | 8 | fp16 | 1216.43 | 10.39 | 6.77 | 6.86 | 7.28 | | base | 384 | 1 | fp16 | 210.59 | 9.03 | 4.83 | 4.86 | 5.06 | | base | 384 | 2 | fp16 | 290.91 | 14.88 | 7.09 | 7.19 | 7.72 | | base | 384 | 4 | fp16 | 407.13 | 18.04 | 9.93 | 10.05 | 10.74 | | base | 384 | 8 | fp16 | 478.67 | 26.06 | 16.92 | 17.19 | 17.76 | | | | | | | | | | | | base | 128 | 1 | tf32 | 223.38 | 6.94 | 4.73 | 4.86 | 5.04 | | base | 128 | 2 | tf32 | 447.57 | 7.2 | 4.68 | 4.82 | 5.07 | | base | 128 | 4 | tf32 | 838.89 | 9.16 | 4.88 | 4.93 | 5.38 | | base | 128 | 8 | tf32 | 1201.05 | 10.81 | 6.88 | 6.99 | 7.21 | | base | 384 | 1 | tf32 | 206.46 | 9.74 | 4.93 | 4.98 | 5.25 | | base | 384 | 2 | tf32 | 287 | 15.57 | 7.18 | 7.27 | 7.87 | | base | 384 | 4 | tf32 | 396.59 | 18.94 | 10.3 | 10.41 | 11.04 | | base | 384 | 8 | tf32 | 479.04 | 26.81 | 16.88 | 17.25 | 17.74 | | | | | | | | | | | | base | 128 | 1 | fp32 | 152.92 | 9.13 | 6.76 | 6.91 | 7.06 | | base | 128 | 2 | fp32 | 297.42 | 9.51 | 6.93 | 7.07 | 7.21 | | base | 128 | 4 | fp32 | 448.57 | 11.81 | 9.12 | 9.25 | 9.68 | | base | 128 | 8 | fp32 | 539.94 | 17.49 | 15 | 15.1 | 15.79 | | base | 384 | 1 | fp32 | 115.19 | 13.69 | 8.89 | 8.98 | 9.27 | | base | 384 | 2 | fp32 | 154.66 | 18.49 | 13.06 | 13.14 | 13.89 | | base | 384 | 4 | fp32 | 174.28 | 28.75 | 23.11 | 23.24 | 24 | | base | 384 | 8 | fp32 | 191.97 | 48.05 | 41.85 | 42.25 | 42.8 | | | | | | | | | | | | large | 128 | 1 | fp16 | 127.75 | 11.18 | 8.14 | 8.25 | 8.53 | | large | 128 | 2 | fp16 | 219.49 | 12.76 | 9.4 | 9.54 | 9.89 | | large | 128 | 4 | fp16 | 315.83 | 19.01 | 12.87 | 12.98 | 13.37 | | large | 128 | 8 | fp16 | 495.75 | 22.21 | 16.33 | 16.45 | 16.79 | | large | 384 | 1 | fp16 | 96.65 | 17.46 | 10.52 | 10.6 | 11 | | large | 384 | 2 | fp16 | 126.07 | 29.43 | 16.09 | 16.22 | 16.78 | | large | 384 | 4 | fp16 | 165.21 | 38.39 | 24.41 | 24.61 | 25.38 | | large | 384 | 8 | fp16 | 182.13 | 61.04 | 44.32 | 44.61 | 45.23 | | | | | | | | | | | | large | 128 | 1 | tf32 | 133.24 | 10.86 | 7.77 | 7.87 | 8.23 | | large | 128 | 2 | tf32 | 218.13 | 12.86 | 9.44 | 9.56 | 9.85 | | large | 128 | 4 | tf32 | 316.25 | 18.98 | 12.91 | 13.01 | 13.57 | | large | 128 | 8 | tf32 | 495.21 | 22.25 | 16.4 | 16.51 | 17.23 | | large | 384 | 1 | tf32 | 95.43 | 17.5 | 10.72 | 10.83 | 11.49 | | large | 384 | 2 | tf32 | 125.99 | 29.47 | 16.06 | 16.15 | 16.67 | | large | 384 | 4 | tf32 | 164.28 | 38.77 | 24.6 | 24.83 | 25.59 | | large | 384 | 8 | tf32 | 182.46 | 61 | 44.2 | 44.46 | 45.15 | | | | | | | | | | | | large | 128 | 1 | fp32 | 50.43 | 23.83 | 20.11 | 20.2 | 20.56 | | large | 128 | 2 | fp32 | 94.47 | 25.53 | 21.36 | 21.49 | 21.78 | | large | 128 | 4 | fp32 | 141.52 | 32.51 | 28.44 | 28.57 | 28.99 | | large | 128 | 8 | fp32 | 166.37 | 52.07 | 48.3 | 48.43 | 49.46 | | large | 384 | 1 | fp32 | 44.42 | 30.54 | 22.67 | 22.74 | 23.46 | | large | 384 | 2 | fp32 | 50.29 | 48.74 | 39.95 | 40.06 | 40.59 | | large | 384 | 4 | fp32 | 55.58 | 81.55 | 72.31 | 72.6 | 73.7 | | large | 384 | 8 | fp32 | 58.38 | 147.63 | 137.43 | 137.82 | 138.3 | To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ##### Inference performance: NVIDIA Tesla T4 (1x T4 16GB) ###### Fine-tuning inference performance for SQuAD v1.1 on Tesla T4 16GB Our results were obtained by running the `scripts/finetune_inference_benchmark.sh` training script in the TensorFlow 20.06-py3 NGC container on NVIDIA Tesla T4 with 1x T4 16GB GPUs. Performance numbers (throughput in sentences per second and latency in milliseconds) were averaged from 1024 iterations. Latency is computed as the time taken for a batch to process as they are fed in one after another in the model ie no pipelining. | Model | Sequence Length | Batch Size | Precision | Throughput-Average(sent/sec) | Latency-Average(ms) | Latency-50%(ms) | Latency-90%(ms) | Latency-95%(ms) | Latency-99%(ms) | Latency-100%(ms) | |-------|-----------------|------------|-----------|------------------------------|---------------------|-----------------|-----------------|-----------------|-----------------|------------------| | base | 128 | 1 | fp16 | 91.93 | 13.94 | 10.93 | 11.41 | 11.52 | 11.94 | 5491.47 | | base | 128 | 2 | fp16 | 148.08 | 16.91 | 13.65 | 13.95 | 14.06 | 14.74 | 5757.12 | | base | 128 | 4 | fp16 | 215.45 | 24.56 | 18.68 | 18.92 | 19.08 | 19.84 | 5894.82 | | base | 128 | 8 | fp16 | 289.52 | 33.07 | 27.77 | 28.22 | 28.38 | 29.16 | 6074.47 | | base | 384 | 1 | fp16 | 60.75 | 23.18 | 16.6 | 16.93 | 17.03 | 17.45 | 7006.41 | | base | 384 | 2 | fp16 | 82.85 | 37.05 | 24.26 | 24.54 | 24.63 | 25.67 | 7529.94 | | base | 384 | 4 | fp16 | 97.78 | 54.4 | 41.02 | 41.53 | 41.94 | 43.91 | 7995.39 | | base | 384 | 8 | fp16 | 106.78 | 89.6 | 74.98 | 75.5 | 76.13 | 78.02 | 8461.93 | | | | | | | | | | | | | | base | 128 | 1 | fp32 | 54.28 | 20.88 | 18.52 | 18.8 | 18.92 | 19.29 | 4401.4 | | base | 128 | 2 | fp32 | 71.75 | 30.57 | 28.08 | 28.51 | 28.62 | 29.12 | 4573.47 | | base | 128 | 4 | fp32 | 88.01 | 50.37 | 45.61 | 45.94 | 46.14 | 47.04 | 4992.7 | | base | 128 | 8 | fp32 | 98.92 | 85.57 | 80.98 | 81.44 | 81.74 | 82.75 | 5408.97 | | base | 384 | 1 | fp32 | 25.83 | 43.63 | 38.75 | 39.33 | 39.43 | 40.02 | 5148.45 | | base | 384 | 2 | fp32 | 29.08 | 77.68 | 68.89 | 69.26 | 69.55 | 72.08 | 5462.5 | | base | 384 | 4 | fp32 | 30.33 | 141.45 | 131.86 | 132.53 | 133.14 | 136.7 | 5975.63 | | base | 384 | 8 | fp32 | 31.8 | 262.88 | 251.62 | 252.23 | 253.08 | 255.56 | 7124 | | | | | | | | | | | | | | large | 128 | 1 | fp16 | 40.31 | 30.61 | 25.14 | 25.62 | 25.87 | 27.61 | 10395.87 | | large | 128 | 2 | fp16 | 63.79 | 37.43 | 31.66 | 32.31 | 32.66 | 34.36 | 10302.2 | | large | 128 | 4 | fp16 | 87.4 | 56.5 | 45.97 | 46.6 | 47.01 | 48.71 | 10391.17 | | large | 128 | 8 | fp16 | 107.5 | 84.29 | 74.59 | 75.25 | 75.64 | 77.73 | 10945.1 | | large | 384 | 1 | fp16 | 23.05 | 55.73 | 43.72 | 44.28 | 44.74 | 46.8 | 12889.23 | | large | 384 | 2 | fp16 | 29.59 | 91.61 | 67.94 | 68.8 | 69.45 | 71.64 | 13876.35 | | large | 384 | 4 | fp16 | 34.27 | 141.56 | 116.67 | 118.02 | 119.1 | 122.1 | 14570.73 | | large | 384 | 8 | fp16 | 38.29 | 237.85 | 208.95 | 210.08 | 211.33 | 214.61 | 16626.02 | | | | | | | | | | | | | | large | 128 | 1 | fp32 | 21.52 | 50.46 | 46.48 | 47.63 | 47.94 | 49.63 | 7150.38 | | large | 128 | 2 | fp32 | 25.4 | 83.3 | 79.06 | 79.61 | 80.06 | 81.77 | 7763.11 | | large | 128 | 4 | fp32 | 28.19 | 149.49 | 142.15 | 143.1 | 143.65 | 145.43 | 7701.38 | | large | 128 | 8 | fp32 | 30.14 | 272.84 | 265.6 | 266.57 | 267.21 | 269.37 | 8246.3 | | large | 384 | 1 | fp32 | 8.46 | 126.81 | 118.44 | 119.42 | 120.31 | 122.74 | 9007.96 | | large | 384 | 2 | fp32 | 9.29 | 231 | 215.54 | 216.64 | 217.71 | 220.35 | 9755.69 | | large | 384 | 4 | fp32 | 9.55 | 436.5 | 418.71 | 420.05 | 421.27 | 424.3 | 11766.45 | | large | 384 | 8 | fp32 | 9.75 | 840.9 | 820.39 | 822.19 | 823.69 | 827.99 | 12856.99 | To achieve these same results, follow the [Quick Start Guide](#quick-start-guide) outlined above. ## Release notes ### Changelog April 2023 - Ceased maintenance of this model in TensorFlow1 June 2020 - Results obtained using 20.06 and on DGX A100 40GB Janurary 2020 - Added inference with TensorRT November 2019 - Pre-training and Finetuning on BioMedical tasks and corpus October 2019 - Disabling Grappler Optimizations for improved performance September 2019 - Pre-training using LAMB - Multi Node support - Fine Tuning support for GLUE (CoLA, MNLI, MRPC) July 2019 - Results obtained using 19.06 - Inference Studies using Triton Inference Server March 2019 - Initial release ### Known issues There are no known issues with this model.
Kaldi/SpeechRecognition/scripts
scripts
run_inference_all_t4
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e if [[ "$(docker ps | grep triton_kaldi_server | wc -l)" == "0" ]]; then printf "\nThe Triton server is currently not running. Please run scripts/docker/launch_server.sh\n\n" exit 1 fi printf "\nOffline benchmarks:\n" scripts/docker/launch_client.sh -i 5 -c 1000 printf "\nOnline benchmarks:\n" scripts/docker/launch_client.sh -i 10 -c 600 -o scripts/docker/launch_client.sh -i 10 -c 400 -o
TensorFlow/Recommendation/NCF
NCF
input_pipeline
# ----------------------------------------------------------------------- # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import cupy as cp def generate_negatives(neg_users, true_mat, item_range, sort=False, use_trick=False): """ Generate negative samples for data augmentation """ neg_u = [] neg_i = [] # If using the shortcut, generate negative items without checking if the associated # user has interacted with it. Speeds up training significantly with very low impact # on accuracy. if use_trick: neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0]) return neg_users, neg_items # Otherwise, generate negative items, check if associated user has interacted with it, # then generate a new one if true while len(neg_users) > 0: neg_items = cp.random.randint(0, high=item_range, size=neg_users.shape[0]) neg_mask = true_mat[neg_users, neg_items] neg_u.append(neg_users[neg_mask]) neg_i.append(neg_items[neg_mask]) neg_users = neg_users[cp.logical_not(neg_mask)] neg_users = cp.concatenate(neg_u) neg_items = cp.concatenate(neg_i) if not sort: return neg_users, neg_items sorted_users = cp.sort(neg_users) sort_indices = cp.argsort(neg_users) return sorted_users, neg_items[sort_indices] class DataGenerator: """ Class to handle data augmentation """ def __init__(self, seed, hvd_rank, num_users, # type: int num_items, # type: int neg_mat, # type: np.ndarray train_users, # type: np.ndarray train_items, # type: np.ndarray train_labels, # type: np.ndarray train_batch_size, # type: int train_negative_samples, # type: int pos_eval_users, # type: np.ndarray pos_eval_items, # type: np.ndarray eval_users_per_batch, # type: int eval_negative_samples, # type: int ): # Check input data if train_users.shape != train_items.shape: raise ValueError( "Train shapes mismatch! {} Users vs {} Items!".format( train_users.shape, train_items.shape)) if pos_eval_users.shape != pos_eval_items.shape: raise ValueError( "Eval shapes mismatch! {} Users vs {} Items!".format( pos_eval_users.shape, pos_eval_items.shape)) np.random.seed(seed) cp.random.seed(seed) # Use GPU assigned to the horovod rank self.hvd_rank = hvd_rank cp.cuda.Device(self.hvd_rank).use() self.num_users = num_users self.num_items = num_items self._neg_mat = neg_mat self._train_users = cp.array(train_users) self._train_items = cp.array(train_items) self._train_labels = cp.array(train_labels) self.train_batch_size = train_batch_size self._train_negative_samples = train_negative_samples self._pos_eval_users = pos_eval_users self._pos_eval_items = pos_eval_items self.eval_users_per_batch = eval_users_per_batch self._eval_negative_samples = eval_negative_samples # Eval data self.eval_users = None self.eval_items = None self.dup_mask = None # Training data self.train_users_batches = None self.train_items_batches = None self.train_labels_batches = None # Augment test data with negative samples def prepare_eval_data(self): pos_eval_users = cp.array(self._pos_eval_users) pos_eval_items = cp.array(self._pos_eval_items) neg_mat = cp.array(self._neg_mat) neg_eval_users_base = cp.repeat(pos_eval_users, self._eval_negative_samples) # Generate negative samples test_u_neg, test_i_neg = generate_negatives(neg_users=neg_eval_users_base, true_mat=neg_mat, item_range=self.num_items, sort=True, use_trick=False) test_u_neg = test_u_neg.reshape((-1, self._eval_negative_samples)).get() test_i_neg = test_i_neg.reshape((-1, self._eval_negative_samples)).get() test_users = self._pos_eval_users.reshape((-1, 1)) test_items = self._pos_eval_items.reshape((-1, 1)) # Combine positive and negative samples test_users = np.concatenate((test_u_neg, test_users), axis=1) test_items = np.concatenate((test_i_neg, test_items), axis=1) # Generate duplicate mask ## Stable sort indices by incrementing all values with fractional position indices = np.arange(test_users.shape[1]).reshape((1, -1)).repeat(test_users.shape[0], axis=0) summed_items = np.add(test_items, indices/test_users.shape[1]) sorted_indices = np.argsort(summed_items, axis=1) sorted_order = np.argsort(sorted_indices, axis=1) sorted_items = np.sort(test_items, axis=1) ## Generate duplicate mask dup_mask = np.equal(sorted_items[:,0:-1], sorted_items[:,1:]) dup_mask = np.concatenate((dup_mask, np.zeros((test_users.shape[0], 1))), axis=1) r_indices = np.arange(test_users.shape[0]).reshape((-1, 1)).repeat(test_users.shape[1], axis=1) dup_mask = dup_mask[r_indices, sorted_order].astype(np.float32) # Reshape all to (-1) and split into chunks batch_size = self.eval_users_per_batch * test_users.shape[1] split_indices = np.arange(batch_size, test_users.shape[0]*test_users.shape[1], batch_size) self.eval_users = np.split(test_users.reshape(-1), split_indices) self.eval_items = np.split(test_items.reshape(-1), split_indices) self.dup_mask = np.split(dup_mask.reshape(-1), split_indices) # Free GPU memory to make space for Tensorflow cp.get_default_memory_pool().free_all_blocks() # Augment training data with negative samples def prepare_train_data(self): batch_size = self.train_batch_size is_neg = cp.logical_not(self._train_labels) # Do not store verification matrix if using the negatives generation shortcut neg_mat = None # If there are no negative samples in the local portion of the training data, do nothing any_neg = cp.any(is_neg) if any_neg: self._train_users[is_neg], self._train_items[is_neg] = generate_negatives( self._train_users[is_neg], neg_mat, self.num_items, use_trick=True ) shuffled_order = cp.random.permutation(self._train_users.shape[0]) self._train_users = self._train_users[shuffled_order] self._train_items = self._train_items[shuffled_order] self._train_labels = self._train_labels[shuffled_order] # Manually create batches split_indices = np.arange(batch_size, self._train_users.shape[0], batch_size) self.train_users_batches = np.split(self._train_users, split_indices) self.train_items_batches = np.split(self._train_items, split_indices) self.train_labels_batches = np.split(self._train_labels, split_indices)
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts
scripts
run_volatility_DGX1-16G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. : ${SEED:=1} : ${LR:=1e-3} : ${BATCH_SIZE:=768} : ${NGPU:=8} : ${EPOCHS:=20} python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \ --dataset volatility \ --data_path /data/processed/volatility_bin \ --batch_size=${BATCH_SIZE} \ --lr ${LR} \ --epochs ${EPOCHS} \ --seed ${SEED} \ --use_amp \ --results /results/TFT_volatility_bs${NGPU}x${BATCH_SIZE}_lr${LR}/seed_${SEED}
PyTorch/Classification/GPUNet/configs
configs
gpunet_torchhub
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch def nvidia_gpunet(pretrained=True, **kwargs): """Constructs a gpunet model (nn.module with additional infer(input) method). For detailed information on model input and output, training recipies, inference and performance visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com Args (type[, default value]): pretrained (bool, True): If True, returns a pretrained model. Pretrained only gpunets. model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16'). Precision fp32 only gpunets model_type (str, 'GPUNet-0'): loads selected model type GPUNet-1.... or GPUNet-P0/P1 or GPUNet-D1/D2. Defaults to GPUNet-0 """ from ..models.gpunet_builder import GPUNet_Builder from .model_hub import get_configs, MODEL_ZOO_NAME2TYPE_B1 from timm.models.helpers import load_checkpoint modelType = kwargs.get('model_type', 'GPUNet-0') print("model_type=", modelType) errMsg = "model_type {} not found, available models are {}".format( modelType, list(MODEL_ZOO_NAME2TYPE_B1.keys()) ) assert modelType in MODEL_ZOO_NAME2TYPE_B1.keys(), errMsg is_prunet = False if "GPUNet-P0" in modelType or "GPUNet-P1" in modelType: is_prunet = True modelLatency = MODEL_ZOO_NAME2TYPE_B1[modelType] print("mapped model latency=", modelLatency) modelJSON, cpkPath = get_configs(batch=1, latency=modelLatency, gpuType="GV100", download=pretrained, config_root_dir=os.path.dirname(__file__)) builder = GPUNet_Builder() model = builder.get_model(modelJSON) if pretrained: errMsg = "checkpoint not found at {}, ".format(cpkPath) errMsg += "retrieve with get_config_and_checkpoint_files " assert os.path.isfile(cpkPath) is True, errMsg if is_prunet: model.load_state_dict(torch.load(cpkPath)) else: load_checkpoint(model, cpkPath, use_ema=True) modelMath = kwargs.get('model_math', 'fp32') if modelMath == "fp16": model.half() return model
PyTorch/Segmentation/MaskRCNN/pytorch/scripts
scripts
inference_benchmark
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. #Predictions will be stored in `FOLDER`/inference` #1x8x4 DGX1V GPU=1 CONFIG='configs/e2e_mask_rcnn_R_50_FPN_1x.yaml' DTYPE=$1 #This folder should a file called 'last_checkpoint' which contains the path to the actual checkpoint FOLDER='/results' #Example # /results # ------last_checkpoint # ------model.pth # # last_checkpoint #----------------------------- #|/results/model.pth | #| | #| | #| | #| | #| | #----------------------------- LOGFILE="$FOLDER/joblog.log" if ! [ -d "$FOLDER" ]; then mkdir $FOLDER; fi python3 -m torch.distributed.launch --nproc_per_node=$GPU tools/test_net.py \ --config-file $CONFIG \ --skip-eval \ DATASETS.TEST "(\"coco_2017_val\",)" \ DTYPE "$DTYPE" \ NHWC "${NHWC:-True}" \ DATALOADER.HYBRID "${HYBRID:-True}" \ OUTPUT_DIR $FOLDER \ TEST.IMS_PER_BATCH $2 \ | tee $LOGFILE #2019-02-22 00:05:39,954 maskrcnn_benchmark.inference INFO: Total inference time: 0:04:55.840343 (0.05916806864738464 s / img per device, on 1 devices) time=`cat $LOGFILE | grep -F 'maskrcnn_benchmark.inference INFO: Total inference time' | tail -n 1 | awk -F'(' '{print $2}' | awk -F' s ' '{print $1}' | egrep -o [0-9.]+` calc=$(echo $time 1.0 | awk '{ printf "%f", $2 / $1 }') echo "Inference perf is: "$calc" FPS"
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_TRAIN_BENCHMARK_1GPU
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP32 on 1 GPU for training benchmarking. Usage: # bash unet_TRAIN_BENCHMARK_FP32_1GPU.sh <path to dataset> <path to results directory> <batch size> horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode train --augment --benchmark --warmup_steps 200 --max_steps 1000 --xla
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt
tft_pyt
requirements
tensorboard pandas==1.1.4
PyTorch/SpeechRecognition/Jasper/platform
platform
DGX2_Jasper_AMP_8GPU
#!/bin/bash NUM_GPUS=8 AMP=true BATCH_SIZE=64 GRAD_ACCUMULATION_STEPS=1 bash scripts/train.sh "$@"
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2
tacotron2
decoderBuilderPlugins
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_DECODERBUILDERPLUGINS_H #define TT2I_DECODERBUILDERPLUGINS_H #include "IModelImporter.h" #include "trtPtr.h" #include "cuda_runtime.h" #include <memory> #include <string> namespace nvinfer1 { class INetworkDefinition; class IBuilder; } // namespace nvinfer1 namespace tts { class DecoderBuilderPlugins { public: /** * @brief Create a new DecoderBuilderPlugins. * * @param numDim The number of dimensions of the input tensor. * @param numChannels The number of channels to be output. */ DecoderBuilderPlugins(int numDim, int numChannels); /** * @brief Build the ICudaEngine for the decoder. * * @param builder The engine builder. * @param importer The model weight importer. * @param maxBatchSize The maximum batch size to support. This must be 1. * @param minInputLength The minimum input length to support. * @param maxInputLength The maximum input length to support. * @param useFP16 Whether or not to allow FP16 usage in the build. * * @return The built engine. */ TRTPtr<nvinfer1::ICudaEngine> build( nvinfer1::IBuilder& builder, IModelImporter& importer, const int maxBatchSize, const int minInputLength, const int maxInputLength, const bool useFP16); private: int mNumEncodingDim; int mNumPrenetDim; int mNumAttentionRNNDim; int mNumAttentionDim; int mNumAttentionFilters; int mAttentionKernelSize; int mNumLSTMDim; int mNumChannels; }; } // namespace tts #endif
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
README
# Tensorflow Object Detection API Creating accurate machine learning models capable of localizing and identifying multiple objects in a single image remains a core challenge in computer vision. The TensorFlow Object Detection API is an open source framework built on top of TensorFlow that makes it easy to construct, train and deploy object detection models. At Google we’ve certainly found this codebase to be useful for our computer vision needs, and we hope that you will as well. <p align="center"> <img src="g3doc/img/kites_detections_output.jpg" width=676 height=450> </p> Contributions to the codebase are welcome and we would love to hear back from you if you find this API useful. Finally if you use the Tensorflow Object Detection API for a research publication, please consider citing: ``` "Speed/accuracy trade-offs for modern convolutional object detectors." Huang J, Rathod V, Sun C, Zhu M, Korattikara A, Fathi A, Fischer I, Wojna Z, Song Y, Guadarrama S, Murphy K, CVPR 2017 ``` \[[link](https://arxiv.org/abs/1611.10012)\]\[[bibtex]( https://scholar.googleusercontent.com/scholar.bib?q=info:l291WsrB-hQJ:scholar.google.com/&output=citation&scisig=AAGBfm0AAAAAWUIIlnPZ_L9jxvPwcC49kDlELtaeIyU-&scisf=4&ct=citation&cd=-1&hl=en&scfhb=1)\] <p align="center"> <img src="g3doc/img/tf-od-api-logo.png" width=140 height=195> </p> ## Maintainers * Jonathan Huang, github: [jch1](https://github.com/jch1) * Vivek Rathod, github: [tombstone](https://github.com/tombstone) * Ronny Votel, github: [ronnyvotel](https://github.com/ronnyvotel) * Derek Chow, github: [derekjchow](https://github.com/derekjchow) * Chen Sun, github: [jesu9](https://github.com/jesu9) * Menglong Zhu, github: [dreamdragon](https://github.com/dreamdragon) * Alireza Fathi, github: [afathi3](https://github.com/afathi3) * Zhichao Lu, github: [pkulzc](https://github.com/pkulzc) ## Table of contents Setup: * <a href='g3doc/installation.md'>Installation</a><br> Quick Start: * <a href='object_detection_tutorial.ipynb'> Quick Start: Jupyter notebook for off-the-shelf inference</a><br> * <a href="g3doc/running_pets.md">Quick Start: Training a pet detector</a><br> Customizing a Pipeline: * <a href='g3doc/configuring_jobs.md'> Configuring an object detection pipeline</a><br> * <a href='g3doc/preparing_inputs.md'>Preparing inputs</a><br> Running: * <a href='g3doc/running_locally.md'>Running locally</a><br> * <a href='g3doc/running_on_cloud.md'>Running on the cloud</a><br> Extras: * <a href='g3doc/detection_model_zoo.md'>Tensorflow detection model zoo</a><br> * <a href='g3doc/exporting_models.md'> Exporting a trained model for inference</a><br> * <a href='g3doc/defining_your_own_model.md'> Defining your own model architecture</a><br> * <a href='g3doc/using_your_own_dataset.md'> Bringing in your own dataset</a><br> * <a href='g3doc/evaluation_protocols.md'> Supported object detection evaluation protocols</a><br> * <a href='g3doc/oid_inference_and_evaluation.md'> Inference and evaluation on the Open Images dataset</a><br> * <a href='g3doc/instance_segmentation.md'> Run an instance segmentation model</a><br> * <a href='g3doc/challenge_evaluation.md'> Run the evaluation for the Open Images Challenge 2018</a><br> * <a href='g3doc/tpu_compatibility.md'> TPU compatible detection pipelines</a><br> * <a href='g3doc/running_on_mobile_tensorflowlite.md'> Running object detection on mobile devices with TensorFlow Lite</a><br> ## Getting Help To get help with issues you may encounter using the Tensorflow Object Detection API, create a new question on [StackOverflow](https://stackoverflow.com/) with the tags "tensorflow" and "object-detection". Please report bugs (actually broken code, not usage questions) to the tensorflow/models GitHub [issue tracker](https://github.com/tensorflow/models/issues), prefixing the issue name with "object_detection". Please check [FAQ](g3doc/faq.md) for frequently asked questions before reporting an issue. ## Release information ### Sep 17, 2018 We have released Faster R-CNN detectors with ResNet-50 / ResNet-101 feature extractors trained on the [iNaturalist Species Detection Dataset](https://github.com/visipedia/inat_comp/blob/master/2017/README.md#bounding-boxes). The models are trained on the training split of the iNaturalist data for 4M iterations, they achieve 55% and 58% mean AP@.5 over 2854 classes respectively. For more details please refer to this [paper](https://arxiv.org/abs/1707.06642). <b>Thanks to contributors</b>: Chen Sun ### July 13, 2018 There are many new updates in this release, extending the functionality and capability of the API: * Moving from slim-based training to [Estimator](https://www.tensorflow.org/api_docs/python/tf/estimator/Estimator)-based training. * Support for [RetinaNet](https://arxiv.org/abs/1708.02002), and a [MobileNet](https://ai.googleblog.com/2017/06/mobilenets-open-source-models-for.html) adaptation of RetinaNet. * A novel SSD-based architecture called the [Pooling Pyramid Network](https://arxiv.org/abs/1807.03284) (PPN). * Releasing several [TPU](https://cloud.google.com/tpu/)-compatible models. These can be found in the `samples/configs/` directory with a comment in the pipeline configuration files indicating TPU compatibility. * Support for quantized training. * Updated documentation for new binaries, Cloud training, and [Tensorflow Lite](https://www.tensorflow.org/mobile/tflite/). See also our [expanded announcement blogpost](https://ai.googleblog.com/2018/07/accelerated-training-and-inference-with.html) and accompanying tutorial at the [TensorFlow blog](https://medium.com/tensorflow/training-and-serving-a-realtime-mobile-object-detector-in-30-minutes-with-cloud-tpus-b78971cf1193). <b>Thanks to contributors</b>: Sara Robinson, Aakanksha Chowdhery, Derek Chow, Pengchong Jin, Jonathan Huang, Vivek Rathod, Zhichao Lu, Ronny Votel ### June 25, 2018 Additional evaluation tools for the [Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) are out. Check out our short tutorial on data preparation and running evaluation [here](g3doc/challenge_evaluation.md)! <b>Thanks to contributors</b>: Alina Kuznetsova ### June 5, 2018 We have released the implementation of evaluation metrics for both tracks of the [Open Images Challenge 2018](https://storage.googleapis.com/openimages/web/challenge.html) as a part of the Object Detection API - see the [evaluation protocols](g3doc/evaluation_protocols.md) for more details. Additionally, we have released a tool for hierarchical labels expansion for the Open Images Challenge: check out [oid_hierarchical_labels_expansion.py](dataset_tools/oid_hierarchical_labels_expansion.py). <b>Thanks to contributors</b>: Alina Kuznetsova, Vittorio Ferrari, Jasper Uijlings ### April 30, 2018 We have released a Faster R-CNN detector with ResNet-101 feature extractor trained on [AVA](https://research.google.com/ava/) v2.1. Compared with other commonly used object detectors, it changes the action classification loss function to per-class Sigmoid loss to handle boxes with multiple labels. The model is trained on the training split of AVA v2.1 for 1.5M iterations, it achieves mean AP of 11.25% over 60 classes on the validation split of AVA v2.1. For more details please refer to this [paper](https://arxiv.org/abs/1705.08421). <b>Thanks to contributors</b>: Chen Sun, David Ross ### April 2, 2018 Supercharge your mobile phones with the next generation mobile object detector! We are adding support for MobileNet V2 with SSDLite presented in [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381). This model is 35% faster than Mobilenet V1 SSD on a Google Pixel phone CPU (200ms vs. 270ms) at the same accuracy. Along with the model definition, we are also releasing a model checkpoint trained on the COCO dataset. <b>Thanks to contributors</b>: Menglong Zhu, Mark Sandler, Zhichao Lu, Vivek Rathod, Jonathan Huang ### February 9, 2018 We now support instance segmentation!! In this API update we support a number of instance segmentation models similar to those discussed in the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870). For further details refer to [our slides](http://presentations.cocodataset.org/Places17-GMRI.pdf) from the 2017 Coco + Places Workshop. Refer to the section on [Running an Instance Segmentation Model](g3doc/instance_segmentation.md) for instructions on how to configure a model that predicts masks in addition to object bounding boxes. <b>Thanks to contributors</b>: Alireza Fathi, Zhichao Lu, Vivek Rathod, Ronny Votel, Jonathan Huang ### November 17, 2017 As a part of the Open Images V3 release we have released: * An implementation of the Open Images evaluation metric and the [protocol](g3doc/evaluation_protocols.md#open-images). * Additional tools to separate inference of detection and evaluation (see [this tutorial](g3doc/oid_inference_and_evaluation.md)). * A new detection model trained on the Open Images V2 data release (see [Open Images model](g3doc/detection_model_zoo.md#open-images-models)). See more information on the [Open Images website](https://github.com/openimages/dataset)! <b>Thanks to contributors</b>: Stefan Popov, Alina Kuznetsova ### November 6, 2017 We have re-released faster versions of our (pre-trained) models in the <a href='g3doc/detection_model_zoo.md'>model zoo</a>. In addition to what was available before, we are also adding Faster R-CNN models trained on COCO with Inception V2 and Resnet-50 feature extractors, as well as a Faster R-CNN with Resnet-101 model trained on the KITTI dataset. <b>Thanks to contributors</b>: Jonathan Huang, Vivek Rathod, Derek Chow, Tal Remez, Chen Sun. ### October 31, 2017 We have released a new state-of-the-art model for object detection using the Faster-RCNN with the [NASNet-A image featurization](https://arxiv.org/abs/1707.07012). This model achieves mAP of 43.1% on the test-dev validation dataset for COCO, improving on the best available model in the zoo by 6% in terms of absolute mAP. <b>Thanks to contributors</b>: Barret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc Le ### August 11, 2017 We have released an update to the [Android Detect demo](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/android) which will now run models trained using the Tensorflow Object Detection API on an Android device. By default, it currently runs a frozen SSD w/Mobilenet detector trained on COCO, but we encourage you to try out other detection models! <b>Thanks to contributors</b>: Jonathan Huang, Andrew Harp ### June 15, 2017 In addition to our base Tensorflow detection model definitions, this release includes: * A selection of trainable detection models, including: * Single Shot Multibox Detector (SSD) with MobileNet, * SSD with Inception V2, * Region-Based Fully Convolutional Networks (R-FCN) with Resnet 101, * Faster RCNN with Resnet 101, * Faster RCNN with Inception Resnet v2 * Frozen weights (trained on the COCO dataset) for each of the above models to be used for out-of-the-box inference purposes. * A [Jupyter notebook](object_detection_tutorial.ipynb) for performing out-of-the-box inference with one of our released models * Convenient [local training](g3doc/running_locally.md) scripts as well as distributed training and evaluation pipelines via [Google Cloud](g3doc/running_on_cloud.md). <b>Thanks to contributors</b>: Jonathan Huang, Vivek Rathod, Derek Chow, Chen Sun, Menglong Zhu, Matthew Tang, Anoop Korattikara, Alireza Fathi, Ian Fischer, Zbigniew Wojna, Yang Song, Sergio Guadarrama, Jasper Uijlings, Viacheslav Kovalevskyi, Kevin Murphy
PyTorch/Forecasting/TFT/triton/runner
runner
start_NVIDIA-A30
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Install Docker . /etc/os-release && \ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \ curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \ curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \ apt-get update && \ apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2 # Install packages pip install -r triton/runner/requirements.txt # Evaluate Runner python3 -m "triton.runner.__main__" \ --config-path "triton/runner/config_NVIDIA-A30.yaml" \ --device 0
PyTorch/Detection/Efficientdet/effdet/layers
layers
padding
""" Padding Helpers Hacked together by / Copyright 2020 Ross Wightman """ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019-2022 Ross Wightman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import List, Tuple import torch.nn.functional as F # Calculate symmetric padding for a convolution def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> int: padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 return padding # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution def get_same_padding(x: int, k: int, s: int, d: int): return max((math.ceil(x / s) - 1) * s + (k - 1) * d + 1 - x, 0) # Can SAME padding for given args be done statically? def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 # Dynamically pad input x with 'SAME' padding for conv with specified args def pad_same(x, k: List[int], s: List[int], d: List[int] = (1, 1), value: float = 0): ih, iw = x.size()[-2:] pad_h, pad_w = get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]) if pad_h > 0 or pad_w > 0: x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2], value=value) return x def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: dynamic = False if isinstance(padding, str): # for any string padding, the padding will be calculated for you, one of three ways padding = padding.lower() if padding == 'same': # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact if is_static_pad(kernel_size, **kwargs): # static case, no extra overhead padding = get_padding(kernel_size, **kwargs) else: # dynamic 'SAME' padding, has runtime/GPU memory overhead padding = 0 dynamic = True elif padding == 'valid': # 'VALID' padding, same as padding=0 padding = 0 else: # Default to PyTorch style 'same'-ish symmetric padding padding = get_padding(kernel_size, **kwargs) return padding, dynamic
PyTorch/Classification/ConvNets/efficientnet
efficientnet
README
# EfficientNet For PyTorch This repository provides a script and recipe to train the EfficientNet model to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. ## Table Of Contents * [Model overview](#model-overview) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Quantization](#quantization) * [Quantization-aware training](#qat) * [Setup](#setup) * [Requirements](#requirements) * [Quick Start Guide](#quick-start-guide) * [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Command-line options](#command-line-options) * [Dataset guidelines](#dataset-guidelines) * [Training process](#training-process) * [Inference process](#inference-process) * [NGC pretrained weights](#ngc-pretrained-weights) * [QAT process](#qat-process) * [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA A100 (8x A100 80GB)](#training-accuracy-nvidia-a100-8x-a100-80gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb) * [Example plots](#example-plots) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA A100 (8x A100 80GB)](#training-performance-nvidia-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb) * [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA A100 (1x A100 80GB)](#inference-performance-nvidia-a100-1x-a100-80gb) * [Inference performance: NVIDIA V100 (1x V100 16GB)](#inference-performance-nvidia-v100-1x-v100-16gb) * [QAT results](#qat-results) * [QAT Training performance: NVIDIA DGX-1 (8x V100 32GB)](#qat-training-performance-nvidia-dgx-1-8x-v100-32gb)) * [QAT Inference accuracy](#qat-inference-accuracy) * [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview EfficientNet is an image classification model family. It was first described in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946). The scripts provided enable you to train the EfficientNet-B0, EfficientNet-B4, EfficientNet-WideSE-B0 and, EfficientNet-WideSE-B4 models. EfficientNet-WideSE models use Squeeze-and-Excitation layers wider than original EfficientNet models, the width of SE module is proportional to the width of Depthwise Separable Convolutions instead of block width. WideSE models are slightly more accurate than original models. This model is trained with mixed precision using Tensor Cores on Volta and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision. ### Default configuration The following sections highlight the default configurations for the EfficientNet models. **Optimizer** This model uses RMSprop with the following hyperparameters: * Momentum (0.9) * Learning rate (LR): * 0.08 for 4096 batch size for B0 models * 0.16 for 4096 batch size for B4 models scale the learning rate. * Learning rate schedule - we use cosine LR schedule * We use linear warmup of the learning rate during the first 16 epochs * Weight decay (WD): * 1e-5 for B0 models * 5e-6 for B4 models * We do not apply WD on Batch Norm trainable parameters (gamma/bias) * Label smoothing = 0.1 * [MixUp](https://arxiv.org/pdf/1710.09412.pdf) = 0.2 * We train for 400 epochs **Optimizer for QAT** This model uses SGD optimizer for B0 models and RMSPROP optimizer alpha=0.853 epsilon=0.00422 for B4 models. Other hyperparameters we used are: * Momentum: * 0.89 for B0 models * 0.9 for B4 models * Learning rate (LR): * 0.0125 for 128 batch size for B0 models * 4.09e-06 for 32 batch size for B4 models scale the learning rate. * Learning rate schedule: * cosine LR schedule for B0 models * linear LR schedule for B4 models * Weight decay (WD): * 4.50e-05 for B0 models * 9.714e-04 for B4 models * We do not apply WD on Batch Norm trainable parameters (gamma/bias) * We train for: *10 epochs for B0 models *2 epochs for B4 models **Data augmentation** This model uses the following data augmentation: * For training: * Auto-augmentation * Basic augmentation: * Normalization * Random resized crop to target images size (depending on model version) * Scale from 8% to 100% * Aspect ratio from 3/4 to 4/3 * Random horizontal flip * For inference: * Normalization * Scale to target image size + 32 * Center crop to target image size ### Feature support matrix The following features are supported by this model: | Feature | EfficientNet |-----------------------|-------------------------- |[DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes (without autoaugmentation) |[APEX AMP](https://nvidia.github.io/apex/amp.html) | Yes |[QAT](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization) | Yes #### Features **NVIDIA DALI** DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader with the DALI library. For more information about DALI, refer to the [DALI product documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html). We use [NVIDIA DALI](https://github.com/NVIDIA/DALI), which speeds up data loading when CPU becomes a bottleneck. DALI can use CPU or GPU, and outperforms the PyTorch native dataloader. Run training with `--data-backends dali-gpu` or `--data-backends dali-cpu` to enable DALI. For DGXA100 and DGX1 we recommend `--data-backends dali-cpu`. DALI currently does not support Autoaugmentation, so for best accuracy it has to be disabled. **[APEX](https://github.com/NVIDIA/apex)** A PyTorch extension that contains utility libraries, such as [Automatic Mixed Precision (AMP)](https://nvidia.github.io/apex/amp.html), which require minimal network code changes to leverage Tensor Cores performance. Refer to the [Enabling mixed precision](#enabling-mixed-precision) section for more details. **[QAT](https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization)** Quantization aware training (QAT) is a method for changing precision to INT8 which speeds up the inference process at the price of a slight decrease of network accuracy. Refer to the [Quantization](#quantization) section for more details. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), a library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be easily applied by using `scale_loss()` method provided by AMP. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed. For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage Tensor Cores performance. To enable mixed precision, you can: - Import AMP from APEX: ```python from apex import amp ``` - Wrap model and optimizer in `amp.initialize`: ```python model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale="dynamic") ``` - Scale loss before backpropagation: ```python with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Quantization Quantization is the process of transforming deep learning models to use parameters and computations at a lower precision. Traditionally, DNN training and inference have relied on the IEEE single-precision floating-point format, using 32 bits to represent the floating-point model weights and activation tensors. This compute budget may be acceptable at training as most DNNs are trained in data centers or in the cloud with NVIDIA V100 or A100 GPUs that have significantly large compute capability and much larger power budgets. However, during deployment, these models are most often required to run on devices with much smaller computing resources and lower power budgets at the edge. Running a DNN inference using the full 32-bit representation is not practical for real-time analysis given the compute, memory, and power constraints of the edge. To help reduce the compute budget, while not compromising on the structure and number of parameters in the model, you can run inference at a lower precision. Initially, quantized inferences were run at half-point precision with tensors and weights represented as 16-bit floating-point numbers. While this resulted in compute savings of about 1.2–1.5x, there was still some compute budget and memory bandwidth that could be leveraged. In lieu of this, models are now quantized to an even lower precision, with an 8-bit integer representation for weights and tensors. This results in a model that is 4x smaller in memory and about 2–4x faster in throughput. While 8-bit quantization is appealing to save compute and memory budgets, it is a lossy process. During quantization, a small range of floating-point numbers are squeezed to a fixed number of information buckets. This results in loss of information. The minute differences which could originally be resolved using 32-bit representations are now lost because they are quantized to the same bucket in 8-bit representations. This is similar to rounding errors that one encounters when representing fractional numbers as integers. To maintain accuracy during inferences at a lower precision, it is important to try and mitigate errors arising due to this loss of information. #### Quantization-aware training In QAT, the quantization error is considered when training the model. The training graph is modified to simulate the lower precision behavior in the forward pass of the training process. This introduces the quantization errors as part of the training loss, which the optimizer tries to minimize during the training. Thus, QAT helps in modeling the quantization errors during training and mitigates its effects on the accuracy of the model at deployment. However, the process of modifying the training graph to simulate lower precision behavior is intricate. To run QAT, it is necessary to insert FakeQuantization nodes for the weights of the DNN Layers and Quantize-Dequantize (QDQ) nodes to the intermediate activation tensors to compute their dynamic ranges. For more information, see this [Quantization paper](https://arxiv.org/abs/2004.09602) and [Quantization-Aware Training](https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html#quantization-training) documentation. Tutorial for `pytoch-quantization` library can be found here [`pytorch-quantization` tutorial](https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/tutorials/quant_resnet50.html). It is important to mention that EfficientNet is NN, which is hard to quantize because the activation function all across the network is the SiLU (called also the Swish), whose negative values lie in very short range, which introduce a large quantization error. More details can be found in Appendix D of the [Quantization paper](https://arxiv.org/abs/2004.09602). ## Setup The following section lists the requirements that you need to meet in order to start training the EfficientNet model. ### Requirements This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [PyTorch 21.03-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer * Supported GPUs: * [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) * [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/) * [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning DGX Documentation: * [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) * [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) * [Running PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running) To set up the required environment or create your own container, as an alternative to the use of the PyTorch NGC container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the efficientnet model on the ImageNet dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/PyTorch/Classification/ ``` 2. Download and pre-process the dataset. The EfficientNet script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge. PyTorch can work directly on JPEGs, therefore, pre-processing/augmentation is not needed. 3. [Download the images](http://image-net.org/download-images). 4. Extract the training data: ```bash mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd .. ``` 5. Extract the validation data and move the images to subfolders: ```bash mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash ``` The directory in which the `train/` and `val/` directories are placed, is referred to as `<path to imagenet>` in this document. 6. Build the EfficientNet PyTorch NGC container. ``` docker build . -t nvidia_efficientnet ``` 7. Start an interactive session in the NGC container to run training/inference. ``` nvidia-docker run --rm -it -v <path to imagenet>:/imagenet --ipc=host nvidia_efficientnet ``` 8. Start training To run training for a standard configuration (DGX A100/DGX-1V, AMP/TF32/FP32, 400 Epochs), run one of the scripts in the `./efficientnet/training` directory called `./efficientnet/training/{AMP, TF32, FP32}/{ DGX A100, DGX-1V }_efficientnet-<version>_{AMP, TF32, FP32}_{ 400 }E.sh`. Ensure ImageNet is mounted in the `/imagenet` directory. For example: `bash ./efficientnet/training/AMP/DGXA100_efficientnet-b0_AMP.sh <path were to store checkpoints and logs>` 9. Start inference You can download pre-trained weights from NGC: ```bash wget --content-disposition -O unzip ``` To run inference on ImageNet, run: `python ./main.py --arch efficientnet-<version> --evaluate --epochs 1 --pretrained -b <batch size> <path to imagenet>` To run inference on JPEG image using pre-trained weights, run: `python classify.py --arch efficientnet-<version> --pretrained --precision AMP|FP32 --image <path to JPEG image>` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code For a non-standard configuration, run: * For 1 GPU * FP32 `python ./main.py --arch efficientnet-<version> --label-smoothing 0.1 <path to imagenet>` `python ./main.py --arch efficientnet-<version> --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>` * For multiple GPUs * FP32 `python ./multiproc.py --nproc_per_node 8 ./main.py --arch efficientnet-<version> --label-smoothing 0.1 <path to imagenet>` * AMP `python ./multiproc.py --nproc_per_node 8 ./main.py --arch efficientnet-<version> --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>` Use `python ./main.py -h` to obtain the list of available options in the `main.py` script. ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: `python main.py -h` ### Dataset guidelines To use your own dataset, divide it into directories. For example: - Training images - `train/<class id>/<image>` - Validation images - `val/<class id>/<image>` If your dataset has a number of classes different than 1000, you need to pass the `--num_classes N` flag to the training script. ### Training process All the results of the training will be stored in the directory specified with `--workspace` argument. The script will store: - the most recent checkpoint - `checkpoint.pth.tar` (unless `--no-checkpoints` flag is used). - the checkpoint with the best validation accuracy - `model_best.pth.tar` (unless `--no-checkpoints` flag is used). - the JSON log - in the file specified with the `--raport-file` flag. Metrics gathered through training: - `train.loss` - training loss - `train.total_ips` - training speed measured in images/second - `train.compute_ips` - training speed measured in images/second, not counting data loading - `train.data_time` - time spent on waiting on data - `train.compute_time` - time spent in forward/backward pass To restart training from the checkpoint use the `--resume` option. To start training from pretrained weights (for example, downloaded from NGC) use the `--pretrained-from-file` option. The difference between `--resume` and `--pretrained-from-file` flags is that the pretrained weights contain only model weights, and checkpoints, apart from model weights, contain optimizer state, LR scheduler state. Checkpoints are suitable for dividing the training into parts, for example, in order to divide the training job into shorter stages, or restart training after an infrastructure failure. Pretrained weights can be used as a base for fine tuning the model to a different dataset, or as a backbone to detection models. ### Inference process Validation is done every epoch, and can be also run separately on a checkpointed model. `python ./main.py --arch efficientnet-<version> --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>` Metrics gathered through training: - `val.loss` - validation loss - `val.top1` - validation top1 accuracy - `val.top5` - validation top5 accuracy - `val.total_ips` - inference speed measured in images/second - `val.compute_ips` - inference speed measured in images/second, not counting data loading - `val.data_time` - time spent on waiting on data - `val.compute_time` - time spent on inference To run inference on JPEG image, you have to first extract the model weights from checkpoint: `python checkpoint2model.py --checkpoint-path <path to checkpoint> --weight-path <path where weights will be stored>` Then, run the classification script: `python classify.py --arch efficientnet-<version> --pretrained-from-file <path to weights from previous step> --precision AMP|FP32 --image <path to JPEG image>` You can also run the ImageNet validation on pretrained weights: `python ./main.py --arch efficientnet-<version> --evaluate --epochs 1 --pretrained-from-file <path to pretrained weights> -b <batch size> <path to imagenet>` #### NGC pretrained weights Pretrained weights can be downloaded from NGC: ```bash wget <ngc weights url> ``` URL for each model can be found in the following table: | **Model** | **NGC weights URL** | |:---------:|:-------------------:| | efficientnet-b0 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b0_210412.pth | | efficientnet-b4 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-b4_210412.pth | | efficientnet-widese-b0 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b0_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b0_210412.pth | | efficientnet-widese-b4 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_widese_b4_pyt_amp/versions/20.12.0/files/nvidia_efficientnet-widese-b4_210412.pth | | efficientnet-quant-b0 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b0_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b0-130421.pth | | efficientnet-quant-b4 | https://api.ngc.nvidia.com/v2/models/nvidia/efficientnet_b4_pyt_qat_ckpt_fp32/versions/21.03.0/files/nvidia-efficientnet-quant-b4-130421.pth | To run inference on ImageNet, run: `python ./main.py --arch efficientnet-<version> --evaluate --epochs 1 --pretrained -b <batch size> <path to imagenet>` To run inference on JPEG images using pretrained weights, run: `python classify.py --arch efficientnet-<version> --pretrained --precision AMP|FP32 --image <path to JPEG image>` ### Quantization process EfficientNet-b0 and EfficientNet-b4 models can be quantized using the QAT process from running the `quant_main.py` script. `python ./quant_main.py <path to imagenet> --arch efficientnet-quant-<version> --epochs <# of QAT epochs> --pretrained-from-file <path to non-quantized model weights> <any other parameters for training such as batch, momentum etc.>` During the QAT process, evaluation is done in the same way as during standard training. `quant_main.py` works in the same way as the original `main.py` script, but with quantized models. It means that `quant_main.py` can be used to resume the QAT process with the flag `--resume`: `python ./quant_main.py <path to imagenet> --arch efficientnet-quant-<version> --resume <path to mid-training checkpoint> ...` or to evaluate a created checkpoint with the flag `--evaluate`: `python ./quant_main.py --arch efficientnet-quant-<version> --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>` It also can run on multi-GPU in an identical way as the standard `main.py` script: `python ./multiproc.py --nproc_per_node 8 ./quant_main.py --arch efficientnet-quant-<version> ... <path to imagenet>` There is also a possibility to transform trained models (quantized or not) into ONNX format, which is needed to convert it later into TensorRT, where quantized networks are much faster during inference. Conversion to TensorRT will be supported in the next release. The conversion to ONNX consists of two steps: * translate checkpoint to pure weights: `python checkpoint2model.py --checkpoint-path <path to quant checkpoint> --weight-path <path where quant weights will be stored>` * translate pure weights to ONNX: `python model2onnx.py --arch efficientnet-quant-<version> --pretrained-from-file <path to model quant weights> -b <batch size> --trt True` Quantized models could also be used to classify new images using the `classify.py` flag. For example: `python classify.py --arch efficientnet-quant-<version> --pretrained-from-file <path to quant weights> --image <path to JPEG image>` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark training, run: * For 1 GPU * FP32 (V100 GPUs only) `python ./launch.py --model efficientnet-<version> --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./launch.py --model efficientnet-<version> --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./launch.py --model efficientnet-<version> --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * For multiple GPUs * FP32 (V100 GPUs only) `python ./launch.py --model efficientnet-<version> --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-<version> --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-<version> --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` Each of these scripts will run 100 iterations and save results in the `benchmark.json` file. #### Inference performance benchmark To benchmark inference, run: * FP32 (V100 GPUs only) `python ./launch.py --model efficientnet-<version> --precision FP32 --mode benchmark_inference --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./launch.py --model efficientnet-<version> --precision TF32 --mode benchmark_inference --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./launch.py --model efficientnet-<version> --precision AMP --mode benchmark_inference --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` Each of these scripts will run 100 iterations and save results in the `benchmark.json` file. ### Results Our results were obtained by running the applicable training script in the pytorch-21.03 NGC container. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) Our results were obtained by running the applicable `efficientnet/training/<AMP|TF32>/*.sh` training script in the PyTorch 20.12 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. | **Model** | **Epochs** | **GPUs** | **Top1 accuracy - TF32** | **Top1 accuracy - mixed precision** | **Time to train - TF32** | **Time to train - mixed precision** | **Time to train speedup (TF32 to mixed precision)** | |:----------------------:|:----------:|:--------:|:------------------------:|:-----------------------------------:|:------------------------:|:-----------------------------------:|:---------------------------------------------------:| | efficientnet-b0 | 400 | 8 | 77.16 +/- 0.07 | 77.42 +/- 0.11 | 19 | 11 | 1.727 | | efficientnet-b4 | 400 | 8 | 82.82 +/- 0.04 | 82.85 +/- 0.09 | 126 | 66 | 1.909 | | efficientnet-widese-b0 | 400 | 8 | 77.84 +/- 0.08 | 77.84 +/- 0.02 | 19 | 10 | 1.900 | | efficientnet-widese-b4 | 400 | 8 | 83.13 +/- 0.11 | 83.1 +/- 0.09 | 126 | 66 | 1.909 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the applicable `efficientnet/training/<AMP|FP32>/*.sh` training script in the PyTorch 20.12 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. | **Model** | **Epochs** | **GPUs** | **Top1 accuracy - FP32** | **Top1 accuracy - mixed precision** | **Time to train - FP32** | **Time to train - mixed precision** | **Time to train speedup (FP32 to mixed precision)** | |:----------------------:|:----------:|:--------:|:------------------------:|:-----------------------------------:|:------------------------:|:-----------------------------------:|:---------------------------------------------------:| | efficientnet-b0 | 400 | 8 | 77.02 +/- 0.04 | 77.17 +/- 0.08 | 34 | 24 | 1.417 | | efficientnet-widese-b0 | 400 | 8 | 77.59 +/- 0.16 | 77.69 +/- 0.12 | 35 | 24 | 1.458 | ##### Example plots The following images show an A100 run. ![ValidationLoss](./img/loss_plot.png) ![ValidationTop1](./img/top1_plot.png) ![ValidationTop5](./img/top5_plot.png) #### Training performance results ##### Training performance: NVIDIA A100 (8x A100 80GB) Our results were obtained by running the applicable `efficientnet/training/<AMP|TF32>/*.sh` training script in the PyTorch 21.03 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. | **Model** | **GPUs** | **TF32** | **Throughput - mixed precision** | **Throughput speedup (TF32 to mixed precision)** | **TF32 Strong Scaling** | **Mixed Precision Strong Scaling** | |:----------------------:|:--------:|:-----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:| | efficientnet-b0 | 1 | 1078 img/s | 2489 img/s | 2.3 x | 1.0 x | 1.0 x | | efficientnet-b0 | 8 | 8193 img/s | 16652 img/s | 2.03 x | 7.59 x | 6.68 x | | efficientnet-b0 | 16 | 16137 img/s | 29332 img/s | 1.81 x | 14.96 x | 11.78 x | | efficientnet-b4 | 1 | 157 img/s | 331 img/s | 2.1 x | 1.0 x | 1.0 x | | efficientnet-b4 | 8 | 1223 img/s | 2570 img/s | 2.1 x | 7.76 x | 7.75 x | | efficientnet-b4 | 16 | 2417 img/s | 4813 img/s | 1.99 x | 15.34 x | 14.51 x | | efficientnet-b4 | 32 | 4813 img/s | 9425 img/s | 1.95 x | 30.55 x | 28.42 x | | efficientnet-b4 | 64 | 9146 img/s | 18900 img/s | 2.06 x | 58.05 x | 57.0 x | | efficientnet-widese-b0 | 1 | 1078 img/s | 2512 img/s | 2.32 x | 1.0 x | 1.0 x | | efficientnet-widese-b0 | 8 | 8244 img/s | 16368 img/s | 1.98 x | 7.64 x | 6.51 x | | efficientnet-widese-b0 | 16 | 16062 img/s | 29798 img/s | 1.85 x | 14.89 x | 11.86 x | | efficientnet-widese-b4 | 1 | 157 img/s | 331 img/s | 2.1 x | 1.0 x | 1.0 x | | efficientnet-widese-b4 | 8 | 1223 img/s | 2585 img/s | 2.11 x | 7.77 x | 7.8 x | | efficientnet-widese-b4 | 16 | 2399 img/s | 5041 img/s | 2.1 x | 15.24 x | 15.21 x | | efficientnet-widese-b4 | 32 | 4616 img/s | 9379 img/s | 2.03 x | 29.32 x | 28.3 x | | efficientnet-widese-b4 | 64 | 9140 img/s | 18516 img/s | 2.02 x | 58.07 x | 55.88 x | ##### Training performance: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the applicable `efficientnet/training/<AMP|FP32>/*.sh` training script in the PyTorch 21.03 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. | **Model** | **GPUs** | **FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | |:----------------------:|:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:| | efficientnet-b0 | 1 | 655 img/s | 1301 img/s | 1.98 x | 1.0 x | 1.0 x | | efficientnet-b0 | 8 | 4672 img/s | 7789 img/s | 1.66 x | 7.12 x | 5.98 x | | efficientnet-b4 | 1 | 83 img/s | 204 img/s | 2.46 x | 1.0 x | 1.0 x | | efficientnet-b4 | 8 | 616 img/s | 1366 img/s | 2.21 x | 7.41 x | 6.67 x | | efficientnet-widese-b0 | 1 | 655 img/s | 1299 img/s | 1.98 x | 1.0 x | 1.0 x | | efficientnet-widese-b0 | 8 | 4592 img/s | 7875 img/s | 1.71 x | 7.0 x | 6.05 x | | efficientnet-widese-b4 | 1 | 83 img/s | 204 img/s | 2.45 x | 1.0 x | 1.0 x | | efficientnet-widese-b4 | 8 | 612 img/s | 1356 img/s | 2.21 x | 7.34 x | 6.63 x | ##### Training performance: NVIDIA DGX-1 (8x V100 32GB) Our results were obtained by running the applicable `efficientnet/training/<AMP|FP32>/*.sh` training script in the PyTorch 21.03 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. | **Model** | **GPUs** | **FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | |:----------------------:|:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:| | efficientnet-b0 | 1 | 646 img/s | 1401 img/s | 2.16 x | 1.0 x | 1.0 x | | efficientnet-b0 | 8 | 4937 img/s | 8615 img/s | 1.74 x | 7.63 x | 6.14 x | | efficientnet-b4 | 1 | 36 img/s | 89 img/s | 2.44 x | 1.0 x | 1.0 x | | efficientnet-b4 | 8 | 641 img/s | 1565 img/s | 2.44 x | 17.6 x | 17.57 x | | efficientnet-widese-b0 | 1 | 281 img/s | 603 img/s | 2.14 x | 1.0 x | 1.0 x | | efficientnet-widese-b0 | 8 | 4924 img/s | 8870 img/s | 1.8 x | 17.49 x | 14.7 x | | efficientnet-widese-b4 | 1 | 36 img/s | 89 img/s | 2.45 x | 1.0 x | 1.0 x | | efficientnet-widese-b4 | 8 | 639 img/s | 1556 img/s | 2.43 x | 17.61 x | 17.44 x | #### Inference performance results ##### Inference performance: NVIDIA A100 (1x A100 80GB) Our results were obtained by running the applicable `efficientnet/inference/<AMP|FP32>/*.sh` inference script in the PyTorch 21.03 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. ###### TF32 Inference Latency | **Model** | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:----------------------:|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | efficientnet-b0 | 1 | 130 img/s | 9.33 ms | 7.95 ms | 9.0 ms | | efficientnet-b0 | 2 | 262 img/s | 9.39 ms | 8.51 ms | 9.5 ms | | efficientnet-b0 | 4 | 503 img/s | 9.68 ms | 9.53 ms | 10.78 ms | | efficientnet-b0 | 8 | 1004 img/s | 9.85 ms | 9.89 ms | 11.49 ms | | efficientnet-b0 | 16 | 1880 img/s | 10.27 ms | 10.34 ms | 11.19 ms | | efficientnet-b0 | 32 | 3401 img/s | 11.46 ms | 12.51 ms | 14.39 ms | | efficientnet-b0 | 64 | 4656 img/s | 19.58 ms | 14.52 ms | 16.63 ms | | efficientnet-b0 | 128 | 5001 img/s | 31.03 ms | 25.72 ms | 28.34 ms | | efficientnet-b0 | 256 | 5154 img/s | 60.71 ms | 49.44 ms | 54.99 ms | | efficientnet-b4 | 1 | 69 img/s | 16.22 ms | 14.87 ms | 15.34 ms | | efficientnet-b4 | 2 | 133 img/s | 16.84 ms | 16.49 ms | 17.72 ms | | efficientnet-b4 | 4 | 259 img/s | 17.33 ms | 16.39 ms | 19.67 ms | | efficientnet-b4 | 8 | 491 img/s | 18.22 ms | 18.09 ms | 19.51 ms | | efficientnet-b4 | 16 | 606 img/s | 28.28 ms | 26.55 ms | 26.84 ms | | efficientnet-b4 | 32 | 651 img/s | 51.08 ms | 49.39 ms | 49.61 ms | | efficientnet-b4 | 64 | 684 img/s | 96.23 ms | 93.54 ms | 93.78 ms | | efficientnet-b4 | 128 | 700 img/s | 195.22 ms | 182.17 ms | 182.42 ms | | efficientnet-b4 | 256 | 702 img/s | 380.01 ms | 361.81 ms | 371.64 ms | | efficientnet-widese-b0 | 1 | 130 img/s | 9.49 ms | 8.76 ms | 9.68 ms | | efficientnet-widese-b0 | 2 | 265 img/s | 9.25 ms | 8.51 ms | 9.75 ms | | efficientnet-widese-b0 | 4 | 520 img/s | 9.42 ms | 8.67 ms | 9.97 ms | | efficientnet-widese-b0 | 8 | 996 img/s | 12.27 ms | 9.69 ms | 11.31 ms | | efficientnet-widese-b0 | 16 | 1916 img/s | 10.2 ms | 10.29 ms | 11.3 ms | | efficientnet-widese-b0 | 32 | 3293 img/s | 11.71 ms | 13.0 ms | 14.57 ms | | efficientnet-widese-b0 | 64 | 4639 img/s | 16.21 ms | 14.61 ms | 16.29 ms | | efficientnet-widese-b0 | 128 | 4997 img/s | 30.81 ms | 25.76 ms | 26.02 ms | | efficientnet-widese-b0 | 256 | 5166 img/s | 73.68 ms | 49.39 ms | 55.74 ms | | efficientnet-widese-b4 | 1 | 68 img/s | 16.41 ms | 15.14 ms | 16.59 ms | | efficientnet-widese-b4 | 2 | 135 img/s | 16.65 ms | 15.52 ms | 17.93 ms | | efficientnet-widese-b4 | 4 | 251 img/s | 17.74 ms | 17.29 ms | 20.47 ms | | efficientnet-widese-b4 | 8 | 501 img/s | 17.75 ms | 17.12 ms | 18.01 ms | | efficientnet-widese-b4 | 16 | 590 img/s | 28.94 ms | 27.29 ms | 27.81 ms | | efficientnet-widese-b4 | 32 | 651 img/s | 50.96 ms | 49.34 ms | 49.55 ms | | efficientnet-widese-b4 | 64 | 683 img/s | 99.28 ms | 93.65 ms | 93.88 ms | | efficientnet-widese-b4 | 128 | 700 img/s | 189.81 ms | 182.3 ms | 182.58 ms | | efficientnet-widese-b4 | 256 | 702 img/s | 379.36 ms | 361.84 ms | 366.05 ms | ###### Mixed Precision Inference Latency | **Model** | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:----------------------:|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | efficientnet-b0 | 1 | 105 img/s | 11.21 ms | 9.9 ms | 12.55 ms | | efficientnet-b0 | 2 | 214 img/s | 11.01 ms | 10.06 ms | 11.89 ms | | efficientnet-b0 | 4 | 412 img/s | 11.45 ms | 11.73 ms | 13.0 ms | | efficientnet-b0 | 8 | 803 img/s | 11.78 ms | 11.59 ms | 14.2 ms | | efficientnet-b0 | 16 | 1584 img/s | 11.89 ms | 11.9 ms | 13.63 ms | | efficientnet-b0 | 32 | 2915 img/s | 13.03 ms | 14.79 ms | 17.35 ms | | efficientnet-b0 | 64 | 6315 img/s | 12.71 ms | 13.59 ms | 15.27 ms | | efficientnet-b0 | 128 | 9311 img/s | 18.78 ms | 15.34 ms | 17.99 ms | | efficientnet-b0 | 256 | 10239 img/s | 39.05 ms | 24.97 ms | 29.24 ms | | efficientnet-b4 | 1 | 53 img/s | 20.45 ms | 19.06 ms | 20.36 ms | | efficientnet-b4 | 2 | 109 img/s | 20.01 ms | 19.74 ms | 21.5 ms | | efficientnet-b4 | 4 | 212 img/s | 20.6 ms | 19.88 ms | 22.37 ms | | efficientnet-b4 | 8 | 416 img/s | 21.02 ms | 21.46 ms | 24.82 ms | | efficientnet-b4 | 16 | 816 img/s | 21.53 ms | 22.91 ms | 26.06 ms | | efficientnet-b4 | 32 | 1208 img/s | 28.4 ms | 26.77 ms | 28.3 ms | | efficientnet-b4 | 64 | 1332 img/s | 50.55 ms | 48.23 ms | 48.49 ms | | efficientnet-b4 | 128 | 1418 img/s | 95.84 ms | 90.12 ms | 95.76 ms | | efficientnet-b4 | 256 | 1442 img/s | 191.48 ms | 176.19 ms | 189.04 ms | | efficientnet-widese-b0 | 1 | 104 img/s | 11.28 ms | 10.0 ms | 12.72 ms | | efficientnet-widese-b0 | 2 | 206 img/s | 11.41 ms | 10.65 ms | 12.72 ms | | efficientnet-widese-b0 | 4 | 426 img/s | 11.15 ms | 10.23 ms | 11.03 ms | | efficientnet-widese-b0 | 8 | 794 img/s | 11.9 ms | 12.68 ms | 14.17 ms | | efficientnet-widese-b0 | 16 | 1536 img/s | 12.32 ms | 13.22 ms | 14.57 ms | | efficientnet-widese-b0 | 32 | 2876 img/s | 14.12 ms | 14.45 ms | 16.23 ms | | efficientnet-widese-b0 | 64 | 6183 img/s | 13.02 ms | 14.19 ms | 16.68 ms | | efficientnet-widese-b0 | 128 | 9310 img/s | 20.06 ms | 15.24 ms | 17.84 ms | | efficientnet-widese-b0 | 256 | 10193 img/s | 36.07 ms | 25.13 ms | 34.22 ms | | efficientnet-widese-b4 | 1 | 53 img/s | 20.24 ms | 19.05 ms | 19.91 ms | | efficientnet-widese-b4 | 2 | 109 img/s | 20.98 ms | 19.24 ms | 22.58 ms | | efficientnet-widese-b4 | 4 | 213 img/s | 20.48 ms | 20.48 ms | 23.64 ms | | efficientnet-widese-b4 | 8 | 425 img/s | 20.57 ms | 20.26 ms | 22.44 ms | | efficientnet-widese-b4 | 16 | 800 img/s | 21.93 ms | 23.15 ms | 26.51 ms | | efficientnet-widese-b4 | 32 | 1201 img/s | 28.51 ms | 26.89 ms | 28.13 ms | | efficientnet-widese-b4 | 64 | 1322 img/s | 50.96 ms | 48.58 ms | 48.77 ms | | efficientnet-widese-b4 | 128 | 1417 img/s | 96.45 ms | 90.17 ms | 90.43 ms | | efficientnet-widese-b4 | 256 | 1439 img/s | 190.06 ms | 176.59 ms | 188.51 ms | ##### Inference performance: NVIDIA V100 (1x V100 16GB) Our results were obtained by running the applicable `efficientnet/inference/<AMP|FP32>/*.sh` inference script in the PyTorch 21.03 NGC container on NVIDIA DGX-1 (8x V100 16GB) GPUs. ###### FP32 Inference Latency | **Model** | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:----------------------:|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | efficientnet-b0 | 1 | 83 img/s | 13.15 ms | 13.23 ms | 14.11 ms | | efficientnet-b0 | 2 | 167 img/s | 13.17 ms | 13.46 ms | 14.39 ms | | efficientnet-b0 | 4 | 332 img/s | 13.25 ms | 13.29 ms | 14.85 ms | | efficientnet-b0 | 8 | 657 img/s | 13.42 ms | 13.86 ms | 15.77 ms | | efficientnet-b0 | 16 | 1289 img/s | 13.78 ms | 15.02 ms | 16.99 ms | | efficientnet-b0 | 32 | 2140 img/s | 16.46 ms | 18.92 ms | 22.2 ms | | efficientnet-b0 | 64 | 2743 img/s | 25.14 ms | 23.44 ms | 23.79 ms | | efficientnet-b0 | 128 | 2908 img/s | 48.03 ms | 43.98 ms | 45.36 ms | | efficientnet-b0 | 256 | 2968 img/s | 94.86 ms | 85.62 ms | 91.01 ms | | efficientnet-b4 | 1 | 45 img/s | 23.31 ms | 23.3 ms | 24.9 ms | | efficientnet-b4 | 2 | 87 img/s | 24.07 ms | 23.81 ms | 25.14 ms | | efficientnet-b4 | 4 | 160 img/s | 26.29 ms | 26.78 ms | 30.85 ms | | efficientnet-b4 | 8 | 316 img/s | 26.65 ms | 26.44 ms | 28.61 ms | | efficientnet-b4 | 16 | 341 img/s | 48.18 ms | 46.9 ms | 47.13 ms | | efficientnet-b4 | 32 | 365 img/s | 89.07 ms | 87.83 ms | 88.02 ms | | efficientnet-b4 | 64 | 374 img/s | 173.2 ms | 171.61 ms | 172.27 ms | | efficientnet-b4 | 128 | 376 img/s | 346.32 ms | 339.74 ms | 340.37 ms | | efficientnet-widese-b0 | 1 | 82 img/s | 13.37 ms | 12.95 ms | 13.89 ms | | efficientnet-widese-b0 | 2 | 168 img/s | 13.11 ms | 12.45 ms | 13.94 ms | | efficientnet-widese-b0 | 4 | 346 img/s | 12.73 ms | 12.22 ms | 12.95 ms | | efficientnet-widese-b0 | 8 | 674 img/s | 13.07 ms | 12.75 ms | 14.93 ms | | efficientnet-widese-b0 | 16 | 1235 img/s | 14.3 ms | 15.05 ms | 16.53 ms | | efficientnet-widese-b0 | 32 | 2194 img/s | 15.99 ms | 17.37 ms | 19.01 ms | | efficientnet-widese-b0 | 64 | 2747 img/s | 25.05 ms | 23.38 ms | 23.71 ms | | efficientnet-widese-b0 | 128 | 2906 img/s | 48.05 ms | 44.0 ms | 44.59 ms | | efficientnet-widese-b0 | 256 | 2962 img/s | 95.14 ms | 85.86 ms | 86.25 ms | | efficientnet-widese-b4 | 1 | 43 img/s | 24.28 ms | 25.24 ms | 27.36 ms | | efficientnet-widese-b4 | 2 | 87 img/s | 24.04 ms | 24.38 ms | 26.01 ms | | efficientnet-widese-b4 | 4 | 169 img/s | 24.96 ms | 25.8 ms | 27.14 ms | | efficientnet-widese-b4 | 8 | 307 img/s | 27.39 ms | 28.4 ms | 30.7 ms | | efficientnet-widese-b4 | 16 | 342 img/s | 48.05 ms | 46.74 ms | 46.9 ms | | efficientnet-widese-b4 | 32 | 363 img/s | 89.44 ms | 88.23 ms | 88.39 ms | | efficientnet-widese-b4 | 64 | 373 img/s | 173.47 ms | 172.01 ms | 172.36 ms | | efficientnet-widese-b4 | 128 | 376 img/s | 347.18 ms | 340.09 ms | 340.45 ms | ###### Mixed Precision Inference Latency | **Model** | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:----------------------:|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | efficientnet-b0 | 1 | 62 img/s | 17.19 ms | 18.01 ms | 18.63 ms | | efficientnet-b0 | 2 | 119 img/s | 17.96 ms | 18.3 ms | 19.95 ms | | efficientnet-b0 | 4 | 238 img/s | 17.9 ms | 17.8 ms | 19.13 ms | | efficientnet-b0 | 8 | 495 img/s | 17.38 ms | 18.34 ms | 19.29 ms | | efficientnet-b0 | 16 | 945 img/s | 18.23 ms | 19.42 ms | 21.58 ms | | efficientnet-b0 | 32 | 1784 img/s | 19.29 ms | 20.71 ms | 22.51 ms | | efficientnet-b0 | 64 | 3480 img/s | 20.34 ms | 22.22 ms | 24.62 ms | | efficientnet-b0 | 128 | 5759 img/s | 26.11 ms | 22.61 ms | 24.06 ms | | efficientnet-b0 | 256 | 6176 img/s | 49.36 ms | 41.18 ms | 43.5 ms | | efficientnet-b4 | 1 | 34 img/s | 30.28 ms | 30.2 ms | 32.24 ms | | efficientnet-b4 | 2 | 69 img/s | 30.12 ms | 30.02 ms | 31.92 ms | | efficientnet-b4 | 4 | 129 img/s | 32.08 ms | 33.29 ms | 34.74 ms | | efficientnet-b4 | 8 | 242 img/s | 34.43 ms | 37.34 ms | 41.08 ms | | efficientnet-b4 | 16 | 488 img/s | 34.12 ms | 36.13 ms | 39.39 ms | | efficientnet-b4 | 32 | 738 img/s | 44.67 ms | 44.85 ms | 47.86 ms | | efficientnet-b4 | 64 | 809 img/s | 80.93 ms | 79.19 ms | 79.42 ms | | efficientnet-b4 | 128 | 843 img/s | 156.42 ms | 152.17 ms | 152.76 ms | | efficientnet-b4 | 256 | 847 img/s | 311.03 ms | 301.44 ms | 302.48 ms | | efficientnet-widese-b0 | 1 | 64 img/s | 16.71 ms | 17.59 ms | 19.23 ms | | efficientnet-widese-b0 | 2 | 129 img/s | 16.63 ms | 16.1 ms | 17.34 ms | | efficientnet-widese-b0 | 4 | 238 img/s | 17.92 ms | 17.52 ms | 18.82 ms | | efficientnet-widese-b0 | 8 | 445 img/s | 19.24 ms | 19.53 ms | 20.4 ms | | efficientnet-widese-b0 | 16 | 936 img/s | 18.64 ms | 19.55 ms | 21.1 ms | | efficientnet-widese-b0 | 32 | 1818 img/s | 18.97 ms | 20.62 ms | 23.06 ms | | efficientnet-widese-b0 | 64 | 3572 img/s | 19.81 ms | 21.14 ms | 23.29 ms | | efficientnet-widese-b0 | 128 | 5748 img/s | 26.18 ms | 23.72 ms | 26.1 ms | | efficientnet-widese-b0 | 256 | 6187 img/s | 49.11 ms | 41.11 ms | 41.59 ms | | efficientnet-widese-b4 | 1 | 32 img/s | 32.1 ms | 31.6 ms | 34.69 ms | | efficientnet-widese-b4 | 2 | 68 img/s | 30.4 ms | 30.9 ms | 32.67 ms | | efficientnet-widese-b4 | 4 | 123 img/s | 33.81 ms | 39.0 ms | 40.76 ms | | efficientnet-widese-b4 | 8 | 257 img/s | 32.34 ms | 33.39 ms | 34.93 ms | | efficientnet-widese-b4 | 16 | 497 img/s | 33.51 ms | 34.92 ms | 37.24 ms | | efficientnet-widese-b4 | 32 | 739 img/s | 44.63 ms | 43.62 ms | 46.39 ms | | efficientnet-widese-b4 | 64 | 808 img/s | 81.08 ms | 79.43 ms | 79.59 ms | | efficientnet-widese-b4 | 128 | 840 img/s | 157.11 ms | 152.87 ms | 153.26 ms | | efficientnet-widese-b4 | 256 | 846 img/s | 310.73 ms | 301.68 ms | 302.9 ms | #### Quantization results ##### QAT Training performance: NVIDIA DGX-1 (8x V100 32GB) | **Model** | **GPUs** | **Calibration** | **QAT model** | **FP32** | **QAT ratio** | |:---------------------:|:---------|:---------------:|:---------------:|:----------:|:-------------:| | efficientnet-quant-b0 | 8 | 14.71 img/s | 2644.62 img/s | 3798 img/s | 0.696 x | | efficientnet-quant-b4 | 8 | 1.85 img/s | 310.41 img/s | 666 img/s | 0.466 x | ###### Quant Inference accuracy The best checkpoints generated during training were used as a base for the QAT. | **Model** | **QAT Epochs** | **QAT Top1** | **Gap between FP32 Top1 and QAT Top1** | |:---------------------:|:--------------:|:------------:|:--------------------------------------:| | efficientnet-quant-b0 | 10 | 77.12 | 0.51 | | efficientnet-quant-b4 | 2 | 82.54 | 0.44 | ## Release notes ### Changelog 1. April 2020 * Initial release ### Known issues There are no known issues with this model.
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn
nn
interaction
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import tensorflow as tf class DotInteractionGather(tf.keras.layers.Layer): def __init__(self, num_features): super(DotInteractionGather, self).__init__() self.num_features = num_features self.indices = [] for i in range(self.num_features): for j in range(i): self.indices.append(i * num_features + j) def call(self, features, bottom_mlp_out=None): interactions = tf.matmul(features, features, transpose_b=True) interactions = tf.reshape(interactions, shape=[-1, self.num_features * self.num_features]) x = tf.gather(params=interactions, indices=self.indices, axis=1) if bottom_mlp_out is not None: x = tf.concat([bottom_mlp_out, x], axis=1) return x
PyTorch/SpeechSynthesis/HiFiGAN/common
common
tb_dllogger
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import glob import re from pathlib import Path import numpy as np import torch from torch.utils.tensorboard import SummaryWriter import dllogger from common.utils import plot_spectrogram tb_loggers = {} class TBLogger: """ xyz_dummies: stretch the screen with empty plots so the legend would always fit for other plots """ def __init__(self, enabled, log_dir, name, interval=1, dummies=True): self.enabled = enabled self.interval = interval self.cache = {} if self.enabled: self.summary_writer = SummaryWriter( log_dir=Path(log_dir, name), flush_secs=120, max_queue=200) atexit.register(self.summary_writer.close) if dummies: for key in ('_', '✕'): self.summary_writer.add_scalar(key, 0.0, 1) def log(self, step, data): for k, v in data.items(): self.log_value(step, k, v.item() if type(v) is torch.Tensor else v) def log_value(self, step, key, val, stat='mean'): if self.enabled: if key not in self.cache: self.cache[key] = [] self.cache[key].append(val) if len(self.cache[key]) == self.interval: agg_val = getattr(np, stat)(self.cache[key]) self.summary_writer.add_scalar(key, agg_val, step) del self.cache[key] def log_grads(self, step, model): if self.enabled: norms = [p.grad.norm().item() for p in model.parameters() if p.grad is not None] for stat in ('max', 'min', 'mean'): self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms), stat=stat) def log_samples(self, step, sample_ind, audio, spec, rate): if self.enabled: log_prefix = 'gt/y' if step == 0 else 'generated/y_hat' self.summary_writer.add_audio( f'{log_prefix}_{sample_ind}', audio[0], step, rate) self.summary_writer.add_figure( f'{log_prefix}_spec_{sample_ind}', plot_spectrogram(spec[0].cpu().numpy()), step) def unique_log_fpath(fpath): """Have a unique log filename for every separate run""" log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1)) for f in glob.glob(f"{fpath}.*")]) return f"{fpath}.{log_num + 1}" def stdout_step_format(step): if isinstance(step, str): return step fields = [] if len(step) > 0: fields.append("epoch {:>4}".format(step[0])) if len(step) > 1: fields.append("iter {:>3}".format(step[1])) if len(step) > 2: fields[-1] += "/{}".format(step[2]) return " | ".join(fields) def stdout_metric_format(metric, metadata, value): name = metadata.get("name", metric + " : ") unit = metadata.get("unit", None) format = f'{{{metadata.get("format", "")}}}' fields = [name, format.format(value) if value is not None else value, unit] fields = [f for f in fields if f is not None] return "| " + " ".join(fields) def log(when, metrics={}, scope='train', flush_log=False, tb_iter=None): dllogger.log(when, data=metrics.get_metrics(scope, 'dll')) if tb_iter is not None: tb_loggers[scope].log(tb_iter, metrics.get_metrics(scope, 'tb')) if flush_log: flush() def log_grads_tb(tb_total_steps, grads, tb_subset='train'): tb_loggers[tb_subset].log_grads(tb_total_steps, grads) def log_samples_tb(tb_total_steps, sample_i, y, y_spec, rate, tb_subset='val',): tb_loggers[tb_subset].log_samples(tb_total_steps, sample_i, y, y_spec, rate) def parameters(data, verbosity=0, tb_subset=None): for k, v in data.items(): dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity) if tb_subset is not None and tb_loggers[tb_subset].enabled: tb_data = {k: v for k, v in data.items() if type(v) in (str, bool, int, float)} tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {}) def flush(): dllogger.flush() for tbl in tb_loggers.values(): if tbl.enabled: tbl.summary_writer.flush()
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact
dot_based_interact
dot_based_interact_tf32_bwd
#include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <mma.h> #include <cuda_fp16.hpp> #include <math.h> #include <fstream> #include <iomanip> #include <iostream> #include <vector> #include <ATen/cuda/CUDAContext.h> #include <torch/extension.h> #include "shared_utils.cuh" using namespace nvcuda; template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint ROW_TILES_PER_STEP, uint COL_TILES_PER_STEP, uint WARP_SIZE, uint WARP_SIZE_LOG_2, uint TILE_DIM, uint TILE_DIM_LOG_2> __launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractTF32BwdKernelNonAligned_(const __half *__restrict input, const __half *__restrict upstream_grad, half __restrict *grad, half __restrict *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size, uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding, uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems, uint input_stride, uint num_row_steps, uint num_col_steps, uint row_tiles_per_step, uint shared_mem_per_warp_size_byte) { extern __shared__ half shared_mem[]; uint warp_id = (threadIdx.x >> WARP_SIZE_LOG_2); uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id; if (sample_id >= batch_size) { return; } uint lane_id = threadIdx.x & (WARP_SIZE - 1); // ">> 1" to convert to half pointer uint smem_warp_offset = warp_id * (shared_mem_per_warp_size_byte >> 1); half *smem_in = &shared_mem[smem_warp_offset]; half *smem_temp = &shared_mem[smem_warp_offset + input_size_elems]; float *smem_out = reinterpret_cast<float *>(smem_temp); // Global memory pointers for the current sample // Input uint gmem_input_sample_offset = sample_id * sample_size; const half *gmem_input = &input[gmem_input_sample_offset]; // Interaction Gradient const uint &gmem_grad_sample_offset = gmem_input_sample_offset; half *gmem_grad = &grad[gmem_grad_sample_offset]; // Bottom MLP gradient half *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols]; // Upstream gradient vector uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding); const half *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset]; // Upstream gradient vector for interactions const half *gmem_ugrad_interactions = &gmem_ugrad[num_cols]; // upstream grad -> shared memory (place in input section temporarily) #pragma unroll for (uint idx = lane_id; idx < interaction_ugrad_size; idx += WARP_SIZE) { smem_in[idx] = gmem_ugrad_interactions[idx]; } __syncwarp(); // Form the 2D ugrad matrix. if (lane_id < num_rows_after_padding) { uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1); uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride; for (uint row = 0; row < num_rows; row++) { half ugrad_val = __float2half(0.0f); if (row < lane_id && lane_id < num_rows) { ugrad_val = smem_in[ugrad_flat_index + row]; smem_temp[ugrad_offset_1 + row] = ugrad_val; } if (row <= lane_id && lane_id < num_rows_after_padding) { smem_temp[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val; } } for (uint row = num_rows; row < num_rows_after_padding; row++) { smem_temp[row * interaction_ugrad_2D_stride + lane_id] = __float2half(0.0f); } } __syncwarp(); // Input -> Shared Memory for (uint row = 0; row < num_rows; row++) { half *smem_row_ptr = &smem_in[row * input_stride]; const half *gmem_row_ptr = &gmem_input[row * num_cols]; for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) { smem_row_ptr[idx] = gmem_row_ptr[idx]; } uint idx = lane_id + num_cols; if (idx < num_cols_after_padding) { smem_row_ptr[idx] = __float2half(0); } } #pragma unroll 2 for (uint row = num_rows; row < num_rows_after_padding; row++) { half *smem_row_ptr = &smem_in[row * input_stride]; for (uint idx = lane_id; idx < num_cols_after_padding; idx += WARP_SIZE) { smem_row_ptr[idx] = __float2half(0); } } __syncwarp(); wmma::fragment<wmma::matrix_a, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> a[ROW_TILES_PER_STEP] [ROW_TILES_PER_STEP]; for (uint i = 0; i < ROW_TILES_PER_STEP; i++) { for (uint j = 0; j < ROW_TILES_PER_STEP; j++) { const half *tile_ptr = smem_temp + ((i * interaction_ugrad_2D_stride + j) << TILE_DIM_LOG_2); wmma::load_matrix_sync(a[i][j], tile_ptr, interaction_ugrad_2D_stride); } } wmma::fragment<wmma::accumulator, TILE_DIM, TILE_DIM, TILE_DIM, float> acc[ROW_TILES_PER_STEP]; wmma::fragment<wmma::matrix_b, TILE_DIM, TILE_DIM, TILE_DIM, half, wmma::row_major> b[ROW_TILES_PER_STEP]; for (int col_step = 0; col_step < num_col_steps; col_step++) { for (uint i = 0; i < ROW_TILES_PER_STEP; i++) { const half *tile_ptr = smem_in + ((i * input_stride + col_step) << TILE_DIM_LOG_2); wmma::fill_fragment(acc[i], 0); wmma::load_matrix_sync(b[i], tile_ptr, input_stride); } for (uint i = 0; i < ROW_TILES_PER_STEP; i++) { for (uint j = 0; j < ROW_TILES_PER_STEP; j++) { wmma::mma_sync(acc[i], a[i][j], b[j], acc[i]); } } for (uint i = 0; i < ROW_TILES_PER_STEP; i++) { float *tile_ptr = smem_out + i * TILE_DIM * TILE_DIM; wmma::store_matrix_sync(tile_ptr, acc[i], TILE_DIM, wmma::mem_row_major); } __syncwarp(); uint gmem_grad_col = (col_step << TILE_DIM_LOG_2) + lane_id; if (gmem_grad_col < num_cols) { for (uint i = 0; i < num_rows; i++) { gmem_grad[i * num_cols + gmem_grad_col] = __float2half(smem_out[(i << TILE_DIM_LOG_2) + lane_id]); } } } for (uint idx = lane_id; idx < num_cols; idx += WARP_SIZE) { gmem_mlp_grad[idx] = gmem_ugrad[idx]; } } template <uint WARPS_PER_BLOCK, uint THREADBLOCK_SIZE, uint WARP_SIZE, uint WARP_SIZE_LOG_2, uint FRAG_A_ROWS, uint FRAG_B_COLS, uint TILE_LENGTH, uint TILE_LENGTH_LOG_2, uint TILE_WIDTH, uint TILE_WIDTH_LOG_2> __launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractTF32BwdKernel(const float *__restrict input, const float *__restrict upstream_grad, float *__restrict grad, float *__restrict bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols, uint num_rows_after_padding, uint num_cols_after_padding, uint sample_size, uint interaction_ugrad_size, uint interaction_ugrad_size_with_padding, uint interaction_ugrad_2D_size_elems, uint interaction_ugrad_2D_stride, uint input_size_elems, uint input_stride, uint shared_mem_per_warp_size_elems, uint num_k_steps, uint num_n_steps) { // The only support sizes for TF32. const uint kWmmaM = 16; const uint kWmmaN = 16; const uint kWmmaK = 8; extern __shared__ float shared_mem_float[]; uint warp_id = threadIdx.x >> WARP_SIZE_LOG_2; uint sample_id = blockIdx.x * WARPS_PER_BLOCK + warp_id; if (sample_id >= batch_size) { return; } uint lane_id = threadIdx.x & (WARP_SIZE - 1); uint smem_warp_offset = warp_id * shared_mem_per_warp_size_elems; float *smem_in = &shared_mem_float[smem_warp_offset]; float *smem_ugrad = &shared_mem_float[smem_warp_offset + input_size_elems]; float *smem_out = &shared_mem_float[smem_warp_offset + input_size_elems + interaction_ugrad_2D_size_elems]; // Global memory pointers for the current sample // Input uint gmem_input_sample_offset = sample_id * sample_size; const float *gmem_input = &input[gmem_input_sample_offset]; // Interaction Gradient const uint &gmem_grad_sample_offset = gmem_input_sample_offset; float *gmem_grad = &grad[gmem_grad_sample_offset]; // Bottom MLP gradient float *gmem_mlp_grad = &bottom_mlp_grad[sample_id * num_cols]; // Upstream gradient vector uint gmem_ugrad_sample_offset = sample_id * (num_cols + interaction_ugrad_size_with_padding); const float *gmem_ugrad = &upstream_grad[gmem_ugrad_sample_offset]; // Upstream gradient vector for interactions const float *gmem_ugrad_interactions = &gmem_ugrad[num_cols]; // upstream grad -> shared memory (place in input section temporarily) #pragma unroll for (uint idx = lane_id; idx < (interaction_ugrad_size >> 2); idx += WARP_SIZE) { float4 tmp = ((float4 *)gmem_ugrad_interactions)[idx]; tmp.x = wmma::__float_to_tf32(tmp.x); tmp.y = wmma::__float_to_tf32(tmp.y); tmp.z = wmma::__float_to_tf32(tmp.z); tmp.w = wmma::__float_to_tf32(tmp.w); ((float4 *)smem_in)[idx] = tmp; } uint offset = (interaction_ugrad_size >> 2) << 2; for (uint idx = lane_id + offset; idx < interaction_ugrad_size; idx += WARP_SIZE) { smem_in[idx] = wmma::__float_to_tf32(gmem_ugrad_interactions[idx]); } __syncwarp(); float zero = wmma::__float_to_tf32(0.0f); float4 zero4; zero4.x = zero; zero4.y = zero; zero4.z = zero; zero4.w = zero; // Form the 2D ugrad matrix. if (lane_id < num_rows_after_padding) { uint ugrad_flat_index = ((lane_id * (lane_id - 1)) >> 1); uint ugrad_offset_1 = lane_id * interaction_ugrad_2D_stride; for (uint row = 0; row < num_rows; row++) { float ugrad_val = zero; if (row < lane_id && lane_id < num_rows) { ugrad_val = smem_in[ugrad_flat_index + row]; smem_ugrad[ugrad_offset_1 + row] = ugrad_val; } if (row <= lane_id && lane_id < num_rows_after_padding) { smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = ugrad_val; } } for (uint row = num_rows; row < num_rows_after_padding; row++) { smem_ugrad[row * interaction_ugrad_2D_stride + lane_id] = zero; } } __syncwarp(); // Input -> Shared Memory if (lane_id < (num_cols >> 2)) { for (uint row = 0; row < num_rows; row++) { float *smem_row_ptr = &smem_in[row * input_stride]; const float *gmem_row_ptr = &gmem_input[row * num_cols]; float4 tmp = ((float4 *)gmem_row_ptr)[lane_id]; tmp.x = wmma::__float_to_tf32(tmp.x); tmp.y = wmma::__float_to_tf32(tmp.y); tmp.z = wmma::__float_to_tf32(tmp.z); tmp.w = wmma::__float_to_tf32(tmp.w); ((float4 *)smem_row_ptr)[lane_id] = tmp; } } uint idx = lane_id + num_cols; if (idx < num_cols_after_padding) { for (uint row = 0; row < num_rows; row++) { float *smem_row_ptr = &smem_in[row * input_stride]; smem_row_ptr[idx] = zero; } } if (lane_id < (num_cols_after_padding >> 2)) { #pragma unroll 2 for (uint row = num_rows; row < num_rows_after_padding; row++) { float *smem_row_ptr = &smem_in[row * input_stride]; ((float4 *)smem_row_ptr)[lane_id] = zero4; } } __syncwarp(); wmma::fragment<wmma::matrix_a, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> a[FRAG_A_ROWS]; wmma::fragment<wmma::matrix_b, kWmmaM, kWmmaN, kWmmaK, wmma::precision::tf32, wmma::row_major> b[FRAG_B_COLS]; wmma::fragment<wmma::accumulator, kWmmaM, kWmmaN, kWmmaK, float> acc[FRAG_A_ROWS][FRAG_B_COLS]; for (uint n = 0; n < num_n_steps; n++) { for (uint i = 0; i < FRAG_A_ROWS; i++) { for (uint j = 0; j < FRAG_B_COLS; j++) { wmma::fill_fragment(acc[i][j], zero); } } for (uint k = 0; k < num_k_steps; k++) { for (uint i = 0; i < FRAG_A_ROWS; i++) { const float *mat_a_tile_ptr = smem_ugrad + (i << TILE_LENGTH_LOG_2) * interaction_ugrad_2D_stride + (k << TILE_WIDTH_LOG_2); wmma::load_matrix_sync(a[i], mat_a_tile_ptr, interaction_ugrad_2D_stride); } for (uint j = 0; j < FRAG_B_COLS; j++) { const float *mat_b_tile_ptr = smem_in + (k << TILE_WIDTH_LOG_2) * input_stride + ((2 * n + j) << TILE_LENGTH_LOG_2); wmma::load_matrix_sync(b[j], mat_b_tile_ptr, input_stride); } for (uint i = 0; i < FRAG_A_ROWS; i++) { for (uint j = 0; j < FRAG_B_COLS; j++) { wmma::mma_sync(acc[i][j], a[i], b[j], acc[i][j]); } } } // __syncwarp(); ? uint out_stride = FRAG_B_COLS << TILE_LENGTH_LOG_2; for (uint i = 0; i < FRAG_A_ROWS; i++) { for (uint j = 0; j < FRAG_B_COLS; j++) { float *out_tile_ptr = smem_out + (i << TILE_LENGTH_LOG_2) * out_stride + (j << TILE_LENGTH_LOG_2); wmma::store_matrix_sync(out_tile_ptr, acc[i][j], out_stride, wmma::mem_row_major); } } uint gmem_grad_col = n * (FRAG_B_COLS << TILE_LENGTH_LOG_2) + lane_id; for (uint i = 0; i < num_rows; i++) { gmem_grad[i * num_cols + gmem_grad_col] = smem_out[i * out_stride + lane_id]; } } if (lane_id < (num_cols >> 2)) { ((float4 *)gmem_mlp_grad)[lane_id] = ((float4 *)gmem_ugrad)[lane_id]; } } inline void dotBasedInteractTF32Bwd(void *input, void *upstream_grad, void *grad, void *bottom_mlp_grad, uint batch_size, uint num_rows, uint num_cols) { // Fragment Settings const uint kFragARows = 2; const uint kFragBCols = 2; const uint kTileLength = 16; const uint kTileLengthLog2 = Log2<kTileLength>::value; const uint kTileWidth = 8; const uint kTileWidthLog2 = Log2<kTileWidth>::value; const uint kWarpSize = 32; const uint kWarpSizeLog2 = Log2<kWarpSize>::value; const uint kSkewFloat = 4; const uint kWarpsPerBlock = 1; const uint kWarpsPerBlockLog2 = Log2<kWarpsPerBlock>::value; const uint kNumThreads = kWarpsPerBlock * kWarpSize; // num tiles uint mat_a_num_row_tiles = (num_rows + kTileLength - 1) >> kTileLengthLog2; uint mat_a_num_col_tiles = (num_rows + kTileWidth - 1) >> kTileWidthLog2; const uint &mat_b_num_row_tiles = mat_a_num_col_tiles; uint mat_b_num_col_tiles = (num_cols + kTileLength - 1) >> kTileLengthLog2; // number of rows and columns after padding uint num_rows_after_padding = mat_a_num_row_tiles << kTileLengthLog2; uint num_cols_after_padding = mat_b_num_col_tiles << kTileLengthLog2; // 2D ugrad size and stride uint interaction_ugrad_2D_stride = num_rows_after_padding + kSkewFloat; uint interaction_ugrad_2D_size_elems = num_rows_after_padding * interaction_ugrad_2D_stride; // 1D ugrad size uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; uint interaction_ugrad_size_with_padding = ((interaction_ugrad_size-1)/8 + 1)*8; // in_out place size and stride uint input_stride = num_cols_after_padding + kSkewFloat; uint input_size_elems = num_rows_after_padding * input_stride; // sample size uint sample_size = num_rows * num_cols; // output size uint output_size_elems = kTileLength * kTileLength * kFragARows * kFragBCols; // Shared memory size uint shared_mem_per_warp_size_elems = interaction_ugrad_2D_size_elems + input_size_elems + output_size_elems; uint shared_mem_size_elems = kWarpsPerBlock * shared_mem_per_warp_size_elems; uint shared_mem_size_bytes = shared_mem_size_elems * sizeof(float); uint num_blocks = (batch_size + kWarpsPerBlock - 1) >> kWarpsPerBlockLog2; uint num_k_steps = mat_a_num_col_tiles; uint num_n_steps = mat_b_num_col_tiles / kFragBCols; bool float4_predicate = !((interaction_ugrad_size_with_padding & 7) || (num_cols & 7)); if (float4_predicate) { dotBasedInteractTF32BwdKernel<kWarpsPerBlock, kNumThreads, kWarpSize, kWarpSizeLog2, kFragARows, kFragBCols, kTileLength, kTileLengthLog2, kTileWidth, kTileWidthLog2> <<<num_blocks, kNumThreads, shared_mem_size_bytes, at::cuda::getCurrentCUDAStream()>>>((const float *)input, (const float *)upstream_grad, (float *)grad, (float *)bottom_mlp_grad, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size, interaction_ugrad_size, interaction_ugrad_size_with_padding, interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems, input_stride, shared_mem_per_warp_size_elems, num_k_steps, num_n_steps); } else { std::cout << "GENERIC VERSION IS UNFINISHED BACKWARD." << std::endl; #ifdef GENERIC_IS_DONE dotBasedInteractTF32BwdKernelNonAligned<kWarpsPerBlock, kNumThreads, kRowTilesPerStep, kColTilesPerStep, kWarpSize, kWarpSizeLog2, kTileDim, kTileDimLog2> <<<num_blocks, kNumThreads, shared_mem_size_bytes, at::cuda::getCurrentCUDAStream()>>>((const half *)input, (const half *)upstream_grad, (half *)grad, (half *)bottom_mlp_grad, batch_size, num_rows, num_cols, num_rows_after_padding, num_cols_after_padding, sample_size, interaction_ugrad_size, interaction_ugrad_size_with_padding, interaction_ugrad_2D_size_elems, interaction_ugrad_2D_stride, input_size_elems, input_stride, num_row_steps, num_col_steps, row_tiles_per_step, shared_mem_per_warp_size_byte); #endif } }
PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer
perf_analyzer
runner
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import logging import os import pathlib import sys from distutils.version import LooseVersion from typing import Dict, List, Optional # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...core import EvaluationMode, MeasurementMode, OfflineMode from ...report import save_results, show_results, sort_results from ...utils import log_dict, parse_server_url from .perf_analyzer import PerfAnalyzer from .perf_config import PerfAnalyzerConfig if LooseVersion(sys.version) >= LooseVersion("3.8.0"): from importlib.metadata import version TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient")) else: import pkg_resources TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version) LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer") class PerfAnalyzerRunner: def __init__( self, server_url: str, model_name: str, input_data: str, input_shapes: List[str], batch_sizes: List[int], concurrency: List[int], measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, result_path: pathlib.Path, output_shared_memory_size: int = 102400, timeout: Optional[int] = None, verbose: bool = False, ): log_dict( "Selected configuration", { "server_url": server_url, "model_name": model_name, "input_data": input_data, "input_shapes": input_shapes, "batch_sizes": batch_sizes, "concurrency": concurrency, "measurement_mode": measurement_mode, "measurement_interval": measurement_interval, "measurement_request_count": measurement_request_count, "evaluation_mode": evaluation_mode, "offline_mode": offline_mode, "output_shared_memory_size": output_shared_memory_size, "result_path": result_path, "timeout": timeout, "verbose": verbose, }, ) if result_path.suffix != ".csv": raise ValueError( "Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv" ) self._server_url = server_url self._model_name = model_name self._input_data = input_data self._input_shapes = input_shapes self._batch_sizes = batch_sizes self._concurrency = concurrency self._measurement_mode = measurement_mode self._measurement_interval = measurement_interval self._measurement_request_count = measurement_request_count self._evaluation_mode = evaluation_mode self._offline_mode = offline_mode self._result_path = result_path self._output_shared_memory_size = output_shared_memory_size self._timeout = timeout self._verbose = verbose self._protocol, self._host, self._port = parse_server_url(server_url) def run(self): results: List[Dict] = [] for batch_size in self._batch_sizes: for concurrency in self._concurrency: performance_partial_file = ( f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv" ) params = { "model-name": self._model_name, "model-version": 1, "batch-size": batch_size, "url": f"{self._host}:{self._port}", "protocol": self._protocol.value, "input-data": self._input_data, "measurement-interval": self._measurement_interval, "concurrency-range": f"{concurrency}:{concurrency}:1", "latency-report-file": performance_partial_file, } if self._verbose: params["extra-verbose"] = True if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"): params["measurement-mode"] = self._measurement_mode.value params["measurement-request-count"] = self._measurement_request_count if self._evaluation_mode == EvaluationMode.OFFLINE: params["shared-memory"] = self._offline_mode.value params["output-shared-memory-size"] = self._output_shared_memory_size if self._verbose: log_dict( f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params ) config = PerfAnalyzerConfig() for param, value in params.items(): config[param] = value for shape in self._input_shapes: config["shape"] = shape perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout) perf_analyzer.run() self._update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=self._result_path.as_posix(), data=results) show_results(results=results) def _calculate_average_latency(self, r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields) return avg_latency def _update_performance_data(self, results: List, batch_size: int, performance_partial_file: str): row: Dict = {"Batch": batch_size} with open(performance_partial_file) as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = self._calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row)
TensorFlow/Segmentation/VNet/examples
examples
vnet_benchmark
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess from os.path import dirname PARSER = argparse.ArgumentParser(description="vnet_benchmark") PARSER.add_argument('--data_dir', required=True, type=str) PARSER.add_argument('--model_dir', required=True, type=str) PARSER.add_argument('--mode', choices=['train', 'predict'], required=True, type=str) PARSER.add_argument('--gpus', choices=[1, 8], required=True, type=int) PARSER.add_argument('--batch_size', required=True, type=int) PARSER.add_argument('--amp', dest='use_amp', action='store_true', default=False) def build_horovod_prefix(gpus): return 'mpirun -np {} -H localhost:{} -bind-to none -map-by slot -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH -x PATH -mca ' \ 'pml ob1 -mca btl ^openib --allow-run-as-root '.format(gpus, gpus) def build_command(FLAGS, path_to_main, use_amp): return 'python {} --data_dir {} --model_dir {} --exec_mode {} --batch_size {} {} --augment --benchmark'.format( path_to_main, FLAGS.data_dir, FLAGS.model_dir, FLAGS.mode, FLAGS.batch_size, use_amp) def main(): FLAGS = PARSER.parse_args() use_amp = '--amp' if FLAGS.use_amp else '' path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), 'main.py') cmd = build_command(FLAGS, path_to_main, use_amp) if FLAGS.gpus > 1: assert FLAGS.mode != 'predict', 'Prediction can only be benchmarked on 1 GPU' cmd = build_horovod_prefix(FLAGS.gpus) + cmd print('Command to be executed:') print(cmd) subprocess.call(cmd, shell=True) if __name__ == '__main__': main()
TensorFlow/Recommendation/WideAndDeep/scripts
scripts
DGXA100_benchmark_training_tf32_1gpu
#!/bin/bash # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -x set -e python -m trainer.task \ --benchmark_warmup_steps 500 \ --benchmark_steps 1000 \ --gpu \ --benchmark
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library
library
tf2onnx_conv
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Iterable # pytype: disable=import-error import onnx import onnx.shape_inference import tensorflow as tf from tf2onnx import optimizer, tfonnx # pytype: enable=import-error from ..core import BaseConverter, Format, Model from ..extensions import converters from .tf import create_session_config def _replace_io_names(graph_proto, io_type, name2tensor): tensor2name = {v: k for k, v in name2tensor.items()} tensor_value_info_list = {"inputs": graph_proto.input, "outputs": graph_proto.output}[io_type] for tensor_value_info in tensor_value_info_list: old_name = tensor_value_info.name new_name = tensor2name.get(old_name) if new_name is not None and new_name != old_name: tensor_value_info.name = new_name # replace other graph nodes I/O for node in graph_proto.node: if old_name in node.input: idx = list(node.input).index(old_name) node.input[idx] = new_name if old_name in node.output: idx = list(node.output).index(old_name) node.output[idx] = new_name def tfgraph2onnx(graph_def, inputnames2tensornames, outputnames2tensornames, *, onnx_opset, onnx_optimized=True): with tf.Graph().as_default() as tf_graph: tf.import_graph_def(graph_def, name="") session_config = create_session_config(allow_growth=True) with tf.compat.v1.Session(graph=tf_graph, config=session_config): input_tensor_names = list(inputnames2tensornames.values()) output_tensor_names = list(outputnames2tensornames.values()) onnx_graph = tfonnx.process_tf_graph( tf_graph, input_names=input_tensor_names, output_names=output_tensor_names, opset=onnx_opset, ) if onnx_optimized: onnx_graph = optimizer.optimize_graph(onnx_graph) graph_doc: str = "triton export" onnx_model = onnx_graph.make_model(graph_doc) # to match tensorflow savedmodel signature _replace_io_names(onnx_model.graph, "inputs", inputnames2tensornames) _replace_io_names(onnx_model.graph, "outputs", outputnames2tensornames) onnx.checker.check_model(onnx_model) onnx.helper.strip_doc_string(onnx_model) onnx_model = onnx.shape_inference.infer_shapes(onnx_model) return onnx_model class TFGraphDef2ONNXConverter(BaseConverter): def __init__(self, *, onnx_opset: int, onnx_optimized: bool = True): self._onnx_opset = onnx_opset self._onnx_optimized = onnx_optimized def convert(self, model: Model, dataloader_fn) -> Model: assert isinstance(model.handle, tf.compat.v1.GraphDef) inputnames2tensorname = {name: spec.name for name, spec in model.inputs.items()} outputnames2tensorname = {name: spec.name for name, spec in model.outputs.items()} onnx_model = tfgraph2onnx( model.handle, inputnames2tensorname, outputnames2tensorname, onnx_opset=self._onnx_opset, onnx_optimized=self._onnx_optimized, ) from .onnx import _infer_graph_precision precision = _infer_graph_precision(onnx_model.graph) assert precision == model.precision # for testing precision inference function return model._replace(handle=onnx_model) converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter) converters.register_extension(f"{Format.TF_KERAS.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter) converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.ONNX.value}", TFGraphDef2ONNXConverter)
PyTorch/LanguageModeling/BART/bart/tokenization
tokenization
tokenization_mbart
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional from utils.file_utils import add_start_docstrings from bart.tokenization.tokenization_utils import BatchEncoding from bart.tokenization.tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING from bart.tokenization.tokenization_xlm_roberta import XLMRobertaTokenizer from utils import logging logger = logging.get_logger(__name__) _all_mbart_models = ["facebook/mbart-large-en-ro", "facebook/mbart-large-cc25"] SPM_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/sentence.bpe.model" FAIRSEQ_LANGUAGE_CODES = [ "ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", ] class MBartTokenizer(XLMRobertaTokenizer): """ This inherits from XLMRobertaTokenizer. ``prepare_seq2seq_batch`` should be used to encode inputs. Other tokenizer methods like ``encode`` do not work properly. The tokenization method is ``<tokens> <eos> <language code>`` for source language documents, and ``<language code> <tokens> <eos>``` for target language documents. Examples:: >>> from transformers import MBartTokenizer >>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-en-ro') >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> batch: dict = tokenizer.prepare_seq2seq_batch( ... example_english_phrase, src_lang="en_XX", tgt_lang="ro_RO", tgt_texts=expected_translation_romanian ... ) """ vocab_files_names = {"vocab_file": "sentencepiece.bpe.model"} max_model_input_sizes = {m: 1024 for m in _all_mbart_models} pretrained_vocab_files_map = {"vocab_file": {m: SPM_URL for m in _all_mbart_models}} prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.cur_lang_code = self.lang_code_to_id["en_XX"] self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} self._additional_special_tokens = list(self.lang_code_to_id.keys()) self.set_src_lang_special_tokens(kwargs.get("src_lang", "en_XX")) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` methods. Args: token_ids_0 (:obj:`List[int]`): List of ids. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Set to True if the token list is already formatted with special tokens for the model Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formated with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. The special tokens depend on calling set_lang. An MBART sequence has the following format, where ``X`` represents the sequence: - ``input_ids`` (for encoder) ``X [eos, src_lang_code]`` - ``decoder_input_ids``: (for decoder) ``[tgt_lang_code] X [eos]`` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: list of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens @add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING) def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", max_length: Optional[int] = None, max_target_length: Optional[int] = None, truncation: bool = True, padding: str = "longest", return_tensors: str = "pt", **kwargs, ) -> BatchEncoding: """Prepare a batch that can be passed directly to an instance of MBartModel. Arguments: src_texts: (:obj:`list`): list of documents to summarize or source language texts src_lang: (:obj:`str`, `optional`, default='en_XX'): default en_XX (english), the language we are translating from tgt_texts: (:obj:`list`, `optional`): list of tgt language texts or summaries. tgt_lang: (:obj:`str`, `optional`, default='ro_RO'): default ro_RO (romanian), the language we are translating to max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts) If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). Return: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **labels** -- List of token ids for tgt_texts The full set of keys ``[input_ids, attention_mask, decoder_input_ids, labels]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.max_len self.set_src_lang_special_tokens(src_lang) model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length self.set_tgt_lang_special_tokens(tgt_lang) labels = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=True, **kwargs, )["input_ids"] model_inputs["labels"] = labels self.set_src_lang_special_tokens(src_lang) # sets to src_lang return model_inputs def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, cur_lang_code].""" self.cur_lang_code = self.lang_code_to_id[src_lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. Prefix [tgt_lang_code], suffix =[eos].""" self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
CUDA-Optimized/FastSpeech/fastspeech/utils
utils
fp16
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import torch import torch.nn as nn """ Revised based on apex/apex/amp/_initialize.py """ def _applier(value, fn): if isinstance(value, torch.cuda.FloatTensor): return fn(value) elif isinstance(value, torch.cuda.HalfTensor): return fn(value) elif isinstance(value, dict): return dict({k : _applier(v, fn) for k, v in value.items()}) elif isinstance(value, tuple): return tuple(_applier(v, fn) for v in value) else: return value def _cast_module_to_half(module, op_list): for op in op_list: if isinstance(module, op): module.half() module.register_forward_pre_hook(lambda module, input: _applier(input, lambda x: x.half())) module.register_forward_hook(lambda module, input, output: _applier(output, lambda x: x.float())) break else: for child in module.children(): _cast_module_to_half(child, op_list) return module def cast_model_to_half(model, op_list=[nn.Linear, nn.Conv1d]): model = _cast_module_to_half(model, op_list) return model
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer
data_transformer
ctgan_data_transformer
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple import cudf import numpy as np import pandas as pd from sklearn.mixture import BayesianGaussianMixture from syngen.generator.tabular.transforms import OneHotEncoding from syngen.generator.tabular.data_transformer.base_data_transformer import ( BaseDataTransformer, ) SpanInfo = namedtuple("SpanInfo", ["dim", "activation_fn"]) ColumnTransformInfo = namedtuple( "ColumnTransformInfo", [ "column_name", "column_type", "transform", "transform_aux", "output_info", "output_dimensions", ], ) class CTGANDataTransformer(BaseDataTransformer): """Data Transformer for CTGAN. Adopted from: https://github.com/sdv-dev/CTGAN Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector. Discrete columns are encoded using a scikit-learn OneHotEncoder. """ def __init__(self, max_clusters=10, weight_threshold=0.005): """Create a data transformer. Args: max_clusters (int): Maximum number of Gaussian distributions in Bayesian GMM. weight_threshold (float): Weight threshold for a Gaussian distribution to be kept. """ self._max_clusters = max_clusters self._weight_threshold = weight_threshold def _fit_continuous(self, column_name, raw_column_data): """Train Bayesian GMM for continuous column.""" gm = BayesianGaussianMixture( n_components=self._max_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, n_init=1, ) gm.fit(raw_column_data.reshape(-1, 1)) valid_component_indicator = gm.weights_ > self._weight_threshold num_components = valid_component_indicator.sum() return ColumnTransformInfo( column_name=column_name, column_type="continuous", transform=gm, transform_aux=valid_component_indicator, output_info=[ SpanInfo(1, "tanh"), SpanInfo(num_components, "softmax"), ], output_dimensions=1 + num_components, ) def _fit_discrete(self, column_name, raw_column_data): """Fit one hot encoder for discrete column.""" ohe = OneHotEncoding() ohe.fit(raw_column_data) num_categories = len(ohe.dummies) return ColumnTransformInfo( column_name=column_name, column_type="discrete", transform=ohe, transform_aux=None, output_info=[SpanInfo(num_categories, "softmax")], output_dimensions=num_categories, ) def get_metadata(self): if hasattr(self, "_column_transform_info_list"): return self._column_transform_info_list return [] def fit(self, raw_data, discrete_columns=tuple()): """Fit GMM for continuous columns and One hot encoder for discrete columns. This step also counts the #columns in matrix data, and span information. """ self.output_info_list = [] self.output_dimensions = 0 if not isinstance(raw_data, (pd.DataFrame, cudf.DataFrame)): self.dataframe = False raw_data = pd.DataFrame(raw_data) else: self.dataframe = True self._column_raw_dtypes = raw_data.dtypes self._column_transform_info_list = [] for column_name in raw_data.columns: raw_column_data = raw_data[column_name].values if not isinstance(raw_column_data, np.ndarray): raw_column_data = raw_column_data.get() # cupy to numpy if column_name in discrete_columns: column_transform_info = self._fit_discrete( column_name, raw_column_data ) else: column_transform_info = self._fit_continuous( column_name, raw_column_data ) self.output_info_list.append(column_transform_info.output_info) self.output_dimensions += column_transform_info.output_dimensions self._column_transform_info_list.append(column_transform_info) def _transform_continuous(self, column_transform_info, raw_column_data): gm = column_transform_info.transform valid_component_indicator = column_transform_info.transform_aux num_components = valid_component_indicator.sum() means = gm.means_.reshape((1, self._max_clusters)) stds = np.sqrt(gm.covariances_).reshape((1, self._max_clusters)) normalized_values = ((raw_column_data - means) / (4 * stds))[ :, valid_component_indicator ] component_probs = gm.predict_proba(raw_column_data)[ :, valid_component_indicator ] selected_component = np.zeros(len(raw_column_data), dtype="int") for i in range(len(raw_column_data)): component_porb_t = component_probs[i] + 1e-6 component_porb_t = component_porb_t / component_porb_t.sum() selected_component[i] = np.random.choice( np.arange(num_components), p=component_porb_t ) selected_normalized_value = normalized_values[ np.arange(len(raw_column_data)), selected_component ].reshape([-1, 1]) selected_normalized_value = np.clip( selected_normalized_value, -0.99, 0.99 ) selected_component_onehot = np.zeros_like(component_probs) selected_component_onehot[ np.arange(len(raw_column_data)), selected_component ] = 1 return [selected_normalized_value, selected_component_onehot] def _transform_discrete(self, column_transform_info, raw_column_data): ohe = column_transform_info.transform return [ohe.transform(raw_column_data)] def transform(self, raw_data): """Take raw data and output a matrix data.""" if not isinstance(raw_data, (pd.DataFrame, cudf.DataFrame)): raw_data = pd.DataFrame(raw_data) column_data_list = [] for column_transform_info in self._column_transform_info_list: column_data = raw_data[[column_transform_info.column_name]].values if not isinstance(column_data, np.ndarray): column_data = column_data.get() # cupy to numpy if column_transform_info.column_type == "continuous": column_data_list += self._transform_continuous( column_transform_info, column_data ) else: assert column_transform_info.column_type == "discrete" column_data_list += self._transform_discrete( column_transform_info, column_data ) return np.concatenate(column_data_list, axis=1).astype(float) def _inverse_transform_continuous( self, column_transform_info, column_data, sigmas, st ): gm = column_transform_info.transform valid_component_indicator = column_transform_info.transform_aux selected_normalized_value = column_data[:, 0] selected_component_probs = column_data[:, 1:] if sigmas is not None: sig = sigmas[st] selected_normalized_value = np.random.normal( selected_normalized_value, sig ) selected_normalized_value = np.clip(selected_normalized_value, -1, 1) component_probs = ( np.ones((len(column_data), self._max_clusters)) * -100 ) component_probs[ :, valid_component_indicator ] = selected_component_probs means = gm.means_.reshape([-1]) stds = np.sqrt(gm.covariances_).reshape([-1]) selected_component = np.argmax(component_probs, axis=1) std_t = stds[selected_component] mean_t = means[selected_component] column = selected_normalized_value * 4 * std_t + mean_t return column def _inverse_transform_discrete(self, column_transform_info, column_data): ohe = column_transform_info.transform return ohe.inverse_transform(column_data) def inverse_transform(self, data, sigmas=None): """Take matrix data and output raw data. Output uses the same type as input to the transform function. Either np array or pd dataframe. """ st = 0 recovered_column_data_list = [] column_names = [] for column_transform_info in self._column_transform_info_list: dim = column_transform_info.output_dimensions column_data = data[:, st : st + dim] if column_transform_info.column_type == "continuous": recovered_column_data = self._inverse_transform_continuous( column_transform_info, column_data, sigmas, st ) else: assert column_transform_info.column_type == "discrete" recovered_column_data = self._inverse_transform_discrete( column_transform_info, column_data ) recovered_column_data_list.append(recovered_column_data) column_names.append(column_transform_info.column_name) st += dim recovered_data = np.column_stack(recovered_column_data_list) recovered_data = pd.DataFrame( recovered_data, columns=column_names ).astype(self._column_raw_dtypes) if not self.dataframe: recovered_data = recovered_data.values return recovered_data def convert_column_name_value_to_id(self, column_name, value): discrete_counter = 0 column_id = 0 for column_transform_info in self._column_transform_info_list: if column_transform_info.column_name == column_name: break if column_transform_info.column_type == "discrete": discrete_counter += 1 column_id += 1 else: raise ValueError( f"The column_name `{column_name}` doesn't exist in the data." ) one_hot = column_transform_info.transform.transform(np.array([value]))[ 0 ] if sum(one_hot) == 0: raise ValueError( f"The value `{value}` doesn't exist in the column `{column_name}`." ) return { "discrete_column_id": discrete_counter, "column_id": column_id, "value_id": np.argmax(one_hot), }
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit
deployment_toolkit
report
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import re from typing import Dict, List from natsort import natsorted from tabulate import tabulate def sort_results(results: List): results = natsorted(results, key=lambda item: [item[key] for key in item.keys()]) return results def save_results(filename: str, data: List, formatted: bool = False): data = format_data(data=data) if formatted else data with open(filename, "a") as csvfile: fieldnames = data[0].keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in data: writer.writerow(row) def format_data(data: List[Dict]) -> List[Dict]: formatted_data = list() for item in data: formatted_item = format_keys(data=item) formatted_data.append(formatted_item) return formatted_data def format_keys(data: Dict) -> Dict: keys = {format_key(key=key): value for key, value in data.items()} return keys def format_key(key: str) -> str: key = " ".join([k.capitalize() for k in re.split("_| ", key)]) return key def show_results(results: List[Dict]): headers = list(results[0].keys()) summary = map(lambda x: list(map(lambda item: item[1], x.items())), results) print(tabulate(summary, headers=headers))
TensorFlow/Segmentation/UNet_3D_Medical/runtime
runtime
arguments
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Command line argument parsing """ import argparse PARSER = argparse.ArgumentParser(description="UNet-3D") # Estimator flags PARSER.add_argument('--model_dir', required=True, type=str) PARSER.add_argument('--exec_mode', choices=['train', 'evaluate', 'train_and_evaluate', 'predict', 'debug_train', 'debug_predict'], type=str) # Training flags PARSER.add_argument('--benchmark', dest='benchmark', action='store_true', default=False) PARSER.add_argument('--max_steps', default=16000, type=int) PARSER.add_argument('--learning_rate', default=0.0002, type=float) PARSER.add_argument('--log_every', default=100, type=int) PARSER.add_argument('--log_dir', type=str) PARSER.add_argument('--loss', choices=['dice', 'ce', 'dice+ce'], default='dice+ce', type=str) PARSER.add_argument('--warmup_steps', default=40, type=int) PARSER.add_argument('--normalization', choices=['instancenorm', 'batchnorm', 'groupnorm'], default='instancenorm', type=str) PARSER.add_argument('--include_background', dest='include_background', action='store_true', default=False) PARSER.add_argument('--resume_training', dest='resume_training', action='store_true', default=False) PARSER.add_argument('--seed', default=0, type=int) # Augmentations PARSER.add_argument('--augment', dest='augment', action='store_true', default=False) # Dataset flags PARSER.add_argument('--data_dir', required=True, type=str) PARSER.add_argument('--input_shape', nargs='+', type=int, default=[128, 128, 128]) PARSER.add_argument('--batch_size', default=1, type=int) PARSER.add_argument('--fold', default=0, type=int) PARSER.add_argument('--num_folds', default=5, type=int) # Tensorflow configuration flags PARSER.add_argument('--use_amp', '--amp', dest='use_amp', action='store_true', default=False) PARSER.add_argument('--use_xla', '--xla', dest='use_xla', action='store_true', default=False)
PaddlePaddle/LanguageModeling/BERT/utils
utils
cuda_bind
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ctypes _cuda_home = os.environ.get('CUDA_HOME', '/usr/local/cuda') _cudart = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libcudart.so')) def cuda_profile_start(): _cudart.cudaProfilerStart() def cuda_profile_stop(): _cudart.cudaProfilerStop() _nvtx = ctypes.CDLL(os.path.join(_cuda_home, 'lib64/libnvToolsExt.so')) def cuda_nvtx_range_push(name): _nvtx.nvtxRangePushW(ctypes.c_wchar_p(name)) def cuda_nvtx_range_pop(): _nvtx.nvtxRangePop()
PyTorch/LanguageModeling/BERT/triton/large/scripts
scripts
setup_environment
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKDIR="${WORKDIR:=$(pwd)}" export DATASETS_DIR=${WORKDIR}/datasets export WORKSPACE_DIR=${WORKDIR}/runner_workspace export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store export SHARED_DIR=${WORKSPACE_DIR}/shared_dir echo "Preparing directories" mkdir -p ${WORKSPACE_DIR} mkdir -p ${DATASETS_DIR} mkdir -p ${CHECKPOINTS_DIR} mkdir -p ${MODEL_REPOSITORY_PATH} mkdir -p ${SHARED_DIR} echo "Setting up environment" export MODEL_NAME=BERT export ENSEMBLE_MODEL_NAME= export TRITON_LOAD_MODEL_METHOD=explicit export TRITON_INSTANCES=1
PyTorch/Classification/GPUNet/triton/08ms-D/runner
runner
config_NVIDIA-DGX-A100-(1x-A100-80GB)
batching: dynamic checkpoints: - name: 0.8ms-D url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p1_pyt_ckpt/versions/21.12.0_amp/zip configurations: - checkpoint: 0.8ms-D parameters: backend_accelerator: trt checkpoint: 0.8ms-D device_kind: gpu export_format: onnx export_precision: fp16 format: onnx max_batch_size: 64 number_of_model_instances: 2 precision: fp16 tensorrt_capture_cuda_graph: 0 torch_jit: none container_version: '21.12' datasets: - name: imagenet datasets_dir: datasets ensemble_model_name: null framework: PyTorch measurement_steps_offline: 8 measurement_steps_online: 32 model_name: GPUnet performance_tool: model_analyzer triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3 triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils
utils
coco_metric
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """COCO-style evaluation metrics. Implements the interface of COCO API and metric_fn in tf.TPUEstimator. COCO API: github.com/cocodataset/cocoapi/ """ from __future__ import absolute_import, division, print_function import atexit import copy import logging import tempfile import cv2 import numpy as np import pycocotools.mask as maskUtils import tensorflow as tf from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval class MaskCOCO(COCO): """COCO object for mask evaluation. """ def reset(self, dataset): """Reset the dataset and groundtruth data index in this object. Args: dataset: dict of groundtruth data. It should has similar structure as the COCO groundtruth JSON file. Must contains three keys: {'images', 'annotations', 'categories'}. 'images': list of image information dictionary. Required keys: 'id', 'width' and 'height'. 'annotations': list of dict. Bounding boxes and segmentations related information. Required keys: {'id', 'image_id', 'category_id', 'bbox', 'iscrowd', 'area', 'segmentation'}. 'categories': list of dict of the category information. Required key: 'id'. Refer to http://cocodataset.org/#format-data for more details. Raises: AttributeError: If the dataset is empty or not a dict. """ assert dataset, 'Groundtruth should not be empty.' assert isinstance(dataset, dict), 'annotation file format {} not supported'.format( type(dataset)) self.anns, self.cats, self.imgs = dict(), dict(), dict() self.dataset = copy.deepcopy(dataset) self.createIndex() def loadRes(self, detection_results, include_mask, is_image_mask=False): """Load result file and return a result api object. Args: detection_results: a dictionary containing predictions results. include_mask: a boolean, whether to include mask in detection results. is_image_mask: a boolean, where the predict mask is a whole image mask. Returns: res: result MaskCOCO api object """ res = MaskCOCO() res.dataset['images'] = [img for img in self.dataset['images']] logging.info('Loading and preparing results...') predictions = self.load_predictions( detection_results, include_mask=include_mask, is_image_mask=is_image_mask) assert isinstance(predictions, list), 'results in not an array of objects' if predictions: image_ids = [pred['image_id'] for pred in predictions] assert set(image_ids) == (set(image_ids) & set(self.getImgIds())), \ 'Results do not correspond to current coco set' if (predictions and 'bbox' in predictions[0] and predictions[0]['bbox']): res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for idx, pred in enumerate(predictions): bb = pred['bbox'] x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]] if 'segmentation' not in pred: pred['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] pred['area'] = bb[2] * bb[3] pred['id'] = idx + 1 pred['iscrowd'] = 0 elif 'segmentation' in predictions[0]: res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) for idx, pred in enumerate(predictions): # now only support compressed RLE format as segmentation results pred['area'] = maskUtils.area(pred['segmentation']) if 'bbox' not in pred: pred['bbox'] = maskUtils.toBbox(pred['segmentation']) pred['id'] = idx + 1 pred['iscrowd'] = 0 res.dataset['annotations'] = predictions res.createIndex() return res def load_predictions(self, detection_results, include_mask, is_image_mask=False): """Create prediction dictionary list from detection and mask results. Args: detection_results: a dictionary containing numpy arrays which corresponds to prediction results. include_mask: a boolean, whether to include mask in detection results. is_image_mask: a boolean, where the predict mask is a whole image mask. Returns: a list of dictionary including different prediction results from the model in numpy form. """ predictions = [] num_detections = detection_results['detection_scores'].size current_index = 0 for i, image_id in enumerate(detection_results['source_ids']): if include_mask: box_coorindates_in_image = detection_results['detection_boxes'][i] segments = generate_segmentation_from_masks( detection_results['detection_masks'][i], box_coorindates_in_image, int(detection_results['image_info'][i][3]), int(detection_results['image_info'][i][4]), is_image_mask=is_image_mask ) # Convert the mask to uint8 and then to fortranarray for RLE encoder. encoded_masks = [ maskUtils.encode(np.asfortranarray(instance_mask.astype(np.uint8))) for instance_mask in segments ] for box_index in range(int(detection_results['num_detections'][i])): if current_index % 1000 == 0: logging.info('{}/{}'.format(current_index, num_detections)) current_index += 1 prediction = { 'image_id': int(image_id), 'bbox': detection_results['detection_boxes'][i][box_index].tolist(), 'score': detection_results['detection_scores'][i][box_index], 'category_id': int( detection_results['detection_classes'][i][box_index]), } if include_mask: prediction['segmentation'] = encoded_masks[box_index] predictions.append(prediction) return predictions def generate_segmentation_from_masks(masks, detected_boxes, image_height, image_width, is_image_mask=False): """Generates segmentation result from instance masks. Args: masks: a numpy array of shape [N, mask_height, mask_width] representing the instance masks w.r.t. the `detected_boxes`. detected_boxes: a numpy array of shape [N, 4] representing the reference bounding boxes. image_height: an integer representing the height of the image. image_width: an integer representing the width of the image. is_image_mask: bool. True: input masks are whole-image masks. False: input masks are bounding-box level masks. Returns: segms: a numpy array of shape [N, image_height, image_width] representing the instance masks *pasted* on the image canvas. """ def expand_boxes(boxes, scale): """Expands an array of boxes by a given scale.""" # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227 # The `boxes` in the reference implementation is in [x1, y1, x2, y2] form, # whereas `boxes` here is in [x1, y1, w, h] form w_half = boxes[:, 2] * .5 h_half = boxes[:, 3] * .5 x_c = boxes[:, 0] + w_half y_c = boxes[:, 1] + h_half w_half *= scale h_half *= scale boxes_exp = np.zeros(boxes.shape) boxes_exp[:, 0] = x_c - w_half boxes_exp[:, 2] = x_c + w_half boxes_exp[:, 1] = y_c - h_half boxes_exp[:, 3] = y_c + h_half return boxes_exp # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812 # To work around an issue with cv2.resize (it seems to automatically pad # with repeated border values), we manually zero-pad the masks by 1 pixel # prior to resizing back to the original image resolution. This prevents # "top hat" artifacts. We therefore need to expand the reference boxes by an # appropriate factor. _, mask_height, mask_width = masks.shape scale = max((mask_width + 2.0) / mask_width, (mask_height + 2.0) / mask_height) ref_boxes = expand_boxes(detected_boxes, scale) ref_boxes = ref_boxes.astype(np.int32) padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32) segms = [] for mask_ind, mask in enumerate(masks): im_mask = np.zeros((image_height, image_width), dtype=np.uint8) if is_image_mask: # Process whole-image masks. im_mask[:, :] = mask[:, :] else: # Process mask inside bounding boxes. padded_mask[1:-1, 1:-1] = mask[:, :] ref_box = ref_boxes[mask_ind, :] w = ref_box[2] - ref_box[0] + 1 h = ref_box[3] - ref_box[1] + 1 w = np.maximum(w, 1) h = np.maximum(h, 1) mask = cv2.resize(padded_mask, (w, h)) mask = np.array(mask > 0.5, dtype=np.uint8) x_0 = max(ref_box[0], 0) x_1 = min(ref_box[2] + 1, image_width) y_0 = max(ref_box[1], 0) y_1 = min(ref_box[3] + 1, image_height) im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]), ( x_0 - ref_box[ 0]):(x_1 - ref_box[ 0])] segms.append(im_mask) segms = np.array(segms) assert masks.shape[0] == segms.shape[0] return segms class EvaluationMetric: """COCO evaluation metric class.""" def __init__(self, filename, include_mask): """Constructs COCO evaluation class. The class provides the interface to metrics_fn in TPUEstimator. The _evaluate() loads a JSON file in COCO annotation format as the groundtruths and runs COCO evaluation. Args: filename: Ground truth JSON file name. If filename is None, use groundtruth data passed from the dataloader for evaluation. include_mask: boolean to indicate whether or not to include mask eval. """ if filename: if filename.startswith('gs://'): _, local_val_json = tempfile.mkstemp(suffix='.json') tf.io.gfile.remove(local_val_json) tf.io.gfile.copy(filename, local_val_json) atexit.register(tf.io.gfile.remove, local_val_json) else: local_val_json = filename self.coco_gt = MaskCOCO(local_val_json) self.filename = filename self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl'] self._include_mask = include_mask if self._include_mask: mask_metric_names = ['mask_' + x for x in self.metric_names] self.metric_names.extend(mask_metric_names) self._reset() def _reset(self): """Reset COCO API object.""" if self.filename is None and not hasattr(self, 'coco_gt'): self.coco_gt = MaskCOCO() def predict_metric_fn(self, predictions, is_predict_image_mask=False, groundtruth_data=None): """Generates COCO metrics.""" image_ids = list(set(predictions['source_ids'])) if groundtruth_data is not None: self.coco_gt.reset(groundtruth_data) coco_dt = self.coco_gt.loadRes( predictions, self._include_mask, is_image_mask=is_predict_image_mask) coco_eval = COCOeval(self.coco_gt, coco_dt, iouType='bbox') coco_eval.params.imgIds = image_ids coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() coco_metrics = coco_eval.stats if self._include_mask: # Create another object for instance segmentation metric evaluation. mcoco_eval = COCOeval(self.coco_gt, coco_dt, iouType='segm') mcoco_eval.params.imgIds = image_ids mcoco_eval.evaluate() mcoco_eval.accumulate() mcoco_eval.summarize() mask_coco_metrics = mcoco_eval.stats if self._include_mask: metrics = np.hstack((coco_metrics, mask_coco_metrics)) else: metrics = coco_metrics # clean up after evaluation is done. self._reset() metrics = metrics.astype(np.float32) metrics_dict = {} for i, name in enumerate(self.metric_names): metrics_dict[name] = metrics[i] return metrics_dict
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner
triton_performance_runner
runner
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # method from PEP-366 to support relative import in executed modules import logging import pathlib from typing import List, Optional, Dict, Tuple if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ..core import EvaluationMode, MeasurementMode, OfflineMode from .perf_analyzer import PerfAnalyzerRunner, PerfAnalyzerWarmupRunner LOGGER = logging.getLogger("triton_performance_runner") class TritonPerformanceRunner: def __init__( self, server_url: str, model_name: str, input_data: Dict[int, Tuple], batch_sizes: List[int], concurrency: List[int], measurement_mode: MeasurementMode, measurement_interval: int, measurement_request_count: int, evaluation_mode: EvaluationMode, offline_mode: OfflineMode, output_shared_memory_size: int, result_path: pathlib.Path, warmup: bool, timeout: Optional[int], verbose: bool, flattened_input: bool, ): self._warmup_runner = None if warmup: LOGGER.info("Running warmup before the main test") self._warmup_runner = PerfAnalyzerWarmupRunner( server_url=server_url, model_name=model_name, input_data=input_data, batch_sizes=batch_sizes, concurrency=concurrency, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, timeout=timeout, flattened_input=flattened_input ) LOGGER.info("Using Perf Analyzer for performance evaluation") self._runner = PerfAnalyzerRunner( server_url=server_url, model_name=model_name, input_data=input_data, batch_sizes=batch_sizes, measurement_mode=measurement_mode, measurement_interval=measurement_interval, measurement_request_count=measurement_request_count, concurrency=concurrency, evaluation_mode=evaluation_mode, offline_mode=offline_mode, output_shared_memory_size=output_shared_memory_size, result_path=result_path, timeout=timeout, verbose=verbose, flattened_input=flattened_input ) def run(self): if self._warmup_runner: self._warmup_runner.run() self._runner.run()
PyTorch/Forecasting/TFT/triton/deployment_toolkit/perf_analyzer
perf_analyzer
perf_config
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict from .exceptions import PerfAnalyzerException class PerfAnalyzerConfig: """ A config class to set arguments to the perf_analyzer. An argument set to None will use the perf_analyzer's default. """ perf_analyzer_args = [ "async", "sync", "measurement-interval", "measurement-mode", "measurement-request-count", "concurrency-range", "request-rate-range", "request-distribution", "request-intervals", "binary-search", "num-of-sequence", "latency-threshold", "max-threads", "stability-percentage", "max-trials", "percentile", "input-data", "shared-memory", "output-shared-memory-size", "sequence-length", "string-length", "string-data", ] perf_analyzer_multiple_args = [ "shape", ] input_to_options = [ "model-name", "model-version", "batch-size", "url", "protocol", "latency-report-file", "streaming", ] input_to_verbose = ["verbose", "extra-verbose"] def __init__(self): """ Construct a PerfAnalyzerConfig """ self._args = {k: None for k in self.perf_analyzer_args} self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args} self._options = { "-m": None, "-x": None, "-b": None, "-u": None, "-i": None, "-f": None, "-H": None, "-c": None, "-t": None, } self._verbose = {"-v": None, "-v -v": None} self._input_to_options = { "model-name": "-m", "model-version": "-x", "batch-size": "-b", "url": "-u", "protocol": "-i", "latency-report-file": "-f", "streaming": "-H", "concurrency": "-c", "threads": "-t", } self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"} @classmethod def allowed_keys(cls): """ Returns ------- list of str The keys that are allowed to be passed into perf_analyzer """ return ( list(cls.perf_analyzer_args) + list(cls.perf_analyzer_multiple_args) + list(cls.input_to_options) + list(cls.input_to_verbose) ) def update_config(self, params=None): """ Allows setting values from a params dict Parameters ---------- params: dict keys are allowed args to perf_analyzer """ if params: for key in params: self[key] = params[key] def to_cli_string(self): """ Utility function to convert a config into a string of arguments to the perf_analyzer with CLI. Returns ------- str cli command string consisting of all arguments to the perf_analyzer set in the config, without the executable name. """ # single dashed options, then verbose flags, then main args args = [f"{k} {v}" for k, v in self._options.items() if v] args += [k for k, v in self._verbose.items() if v] args += [f"--{k}={v}" for k, v in self._args.items() if v] for k, v in self._multiple_args.items(): for item in v: args.append(f"--{k}={item}") return " ".join(args) def __getitem__(self, key: str): """ Gets an arguments value in config Parameters ---------- key : str The name of the argument to the perf_analyzer Returns ------- The value that the argument is set to in this config Raises ------ TritonModelAnalyzerException If argument not found in the config """ if key in self._args: return self._args[key] elif key in self._multiple_args: return self._multiple_args[key] elif key in self._input_to_options: return self._options[self._input_to_options[key]] elif key in self._input_to_verbose: return self._verbose[self._input_to_verbose[key]] else: raise PerfAnalyzerException(f"'{key}' Key not found in config") def __setitem__(self, key: str, value: Any): """ Sets an arguments value in config after checking if defined/supported. Parameters ---------- key : str The name of the argument to the perf_analyzer value : (any) The value to which the argument is being set Raises ------ TritonModelAnalyzerException If key is unsupported or undefined in the config class """ if key in self._args: self._args[key] = value elif key in self._multiple_args: self._multiple_args[key].append(value) elif key in self._input_to_options: self._options[self._input_to_options[key]] = value elif key in self._input_to_verbose: self._verbose[self._input_to_verbose[key]] = value else: raise PerfAnalyzerException( f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer." )
PyTorch/Segmentation/MaskRCNN/pytorch/configs
configs
e2e_mask_rcnn_X_101_32x8d_FPN_1x
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/FAIR/20171220/X-101-32x8d" BACKBONE: CONV_BODY: "R-101-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False RESNETS: STRIDE_IN_1X1: False NUM_GROUPS: 32 WIDTH_PER_GROUP: 8 MASK_ON: True DATASETS: TRAIN: ("coco_2014_train", "coco_2014_valminusminival") TEST: ("coco_2014_minival",) DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 STEPS: (120000, 160000) MAX_ITER: 180000 IMS_PER_BATCH: 8
PyTorch/LanguageModeling/BERT/triton/dist6l/runner
runner
config_NVIDIA-T4
checkpoints: - name: dist-6l-qa url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/bert_pyt_ckpt_distilled_6l_768d_qa_squad11_amp/versions/20.12.0/zip configurations: - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: trt accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: trt accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: trt accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 - 8 - 16 batch_sizes: 1 8 16 capture_cuda_graph: 0 checkpoint_variant: dist-6l-qa export_format: ts-trace export_precision: fp16 format: ts-trace max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 container_version: '21.10' datasets: - name: data datasets_dir: datasets framework: PyTorch model_name: BERT triton_container_image: null triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
TensorFlow/Detection/SSD/models/research/object_detection/models
models
ssd_mobilenet_v1_fpn_feature_extractor
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SSD MobilenetV1 FPN Feature Extractor.""" import copy import functools import tensorflow as tf from object_detection.meta_architectures import ssd_meta_arch from object_detection.models import feature_map_generators from object_detection.utils import context_manager from object_detection.utils import ops from object_detection.utils import shape_utils from nets import mobilenet_v1 slim = tf.contrib.slim # A modified config of mobilenet v1 that makes it more detection friendly, def _create_modified_mobilenet_config(): conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS) conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512) conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256) return conv_defs class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor): """SSD Feature Extractor using MobilenetV1 FPN features.""" def __init__(self, is_training, depth_multiplier, min_depth, pad_to_multiple, conv_hyperparams_fn, fpn_min_level=3, fpn_max_level=7, additional_layer_depth=256, reuse_weights=None, use_explicit_padding=False, use_depthwise=False, override_base_feature_extractor_hyperparams=False): """SSD FPN feature extractor based on Mobilenet v1 architecture. Args: is_training: whether the network is in training mode. depth_multiplier: float depth multiplier for feature extractor. min_depth: minimum feature extractor depth. pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d and separable_conv2d ops in the layers that are added on top of the base feature extractor. fpn_min_level: the highest resolution feature map to use in FPN. The valid values are {2, 3, 4, 5} which map to MobileNet v1 layers {Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise, Conv2d_13_pointwise}, respectively. fpn_max_level: the smallest resolution feature map to construct or use in FPN. FPN constructions uses features maps starting from fpn_min_level upto the fpn_max_level. In the case that there are not enough feature maps in the backbone network, additional feature maps are created by applying stride 2 convolutions until we get the desired number of fpn levels. additional_layer_depth: additional feature map layer channel depth. reuse_weights: whether to reuse variables. Default is None. use_explicit_padding: Whether to use explicit padding when extracting features. Default is False. use_depthwise: Whether to use depthwise convolutions. Default is False. override_base_feature_extractor_hyperparams: Whether to override hyperparameters of the base feature extractor with the one from `conv_hyperparams_fn`. """ super(SSDMobileNetV1FpnFeatureExtractor, self).__init__( is_training=is_training, depth_multiplier=depth_multiplier, min_depth=min_depth, pad_to_multiple=pad_to_multiple, conv_hyperparams_fn=conv_hyperparams_fn, reuse_weights=reuse_weights, use_explicit_padding=use_explicit_padding, use_depthwise=use_depthwise, override_base_feature_extractor_hyperparams= override_base_feature_extractor_hyperparams) self._fpn_min_level = fpn_min_level self._fpn_max_level = fpn_max_level self._additional_layer_depth = additional_layer_depth self._conv_defs = None if self._use_depthwise: self._conv_defs = _create_modified_mobilenet_config() def preprocess(self, resized_inputs): """SSD preprocessing. Maps pixel values to the range [-1, 1]. Args: resized_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. """ return (2.0 / 255.0) * resized_inputs - 1.0 def extract_features(self, preprocessed_inputs): """Extract features from preprocessed inputs. Args: preprocessed_inputs: a [batch, height, width, channels] float tensor representing a batch of images. Returns: feature_maps: a list of tensors where the ith tensor has shape [batch, height_i, width_i, depth_i] """ preprocessed_inputs = shape_utils.check_min_image_dim( 33, preprocessed_inputs) with tf.variable_scope('MobilenetV1', reuse=self._reuse_weights) as scope: with slim.arg_scope( mobilenet_v1.mobilenet_v1_arg_scope( is_training=None, regularize_depthwise=True)): with (slim.arg_scope(self._conv_hyperparams_fn()) if self._override_base_feature_extractor_hyperparams else context_manager.IdentityContextManager()): _, image_features = mobilenet_v1.mobilenet_v1_base( ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple), final_endpoint='Conv2d_13_pointwise', min_depth=self._min_depth, depth_multiplier=self._depth_multiplier, conv_defs=self._conv_defs, use_explicit_padding=self._use_explicit_padding, scope=scope) depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth) with slim.arg_scope(self._conv_hyperparams_fn()): with tf.variable_scope('fpn', reuse=self._reuse_weights): feature_blocks = [ 'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise', 'Conv2d_13_pointwise' ] base_fpn_max_level = min(self._fpn_max_level, 5) feature_block_list = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_block_list.append(feature_blocks[level - 2]) fpn_features = feature_map_generators.fpn_top_down_feature_maps( [(key, image_features[key]) for key in feature_block_list], depth=depth_fn(self._additional_layer_depth), use_depthwise=self._use_depthwise, use_explicit_padding=self._use_explicit_padding) feature_maps = [] for level in range(self._fpn_min_level, base_fpn_max_level + 1): feature_maps.append(fpn_features['top_down_{}'.format( feature_blocks[level - 2])]) last_feature_map = fpn_features['top_down_{}'.format( feature_blocks[base_fpn_max_level - 2])] # Construct coarse features padding = 'VALID' if self._use_explicit_padding else 'SAME' kernel_size = 3 for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1): if self._use_depthwise: conv_op = functools.partial( slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d if self._use_explicit_padding: last_feature_map = ops.fixed_padding( last_feature_map, kernel_size) last_feature_map = conv_op( last_feature_map, num_outputs=depth_fn(self._additional_layer_depth), kernel_size=[kernel_size, kernel_size], stride=2, padding=padding, scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13)) feature_maps.append(last_feature_map) return feature_maps
PyTorch/SpeechSynthesis/HiFiGAN/scripts/docker
docker
build
#!/usr/bin/env bash docker build . -t hifigan:latest
TensorFlow2/LanguageModeling/ELECTRA/data
data
Downloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader from WikiDownloader import WikiDownloader from BooksDownloader import BooksDownloader from MRPCDownloader import MRPCDownloader from SquadDownloader import SquadDownloader class Downloader: def __init__(self, dataset_name, save_path): self.dataset_name = dataset_name self.save_path = save_path def download(self): if self.dataset_name == 'bookscorpus': self.download_bookscorpus() elif self.dataset_name == 'wikicorpus_en': self.download_wikicorpus('en') elif self.dataset_name == 'wikicorpus_zh': self.download_wikicorpus('zh') elif self.dataset_name == 'google_pretrained_weights': self.download_google_pretrained_weights() elif self.dataset_name == 'nvidia_pretrained_weights': self.download_nvidia_pretrained_weights() elif self.dataset_name == 'mrpc': self.download_mrpc() elif self.dataset_name == 'squad': self.download_squad() elif self.dataset_name == 'all': self.download_bookscorpus(self.save_path) self.download_wikicorpus('en', self.save_path) self.download_wikicorpus('zh', self.save_path) self.download_google_pretrained_weights(self.save_path) self.download_nvidia_pretrained_weights(self.save_path) self.download_mrpc(self.save_path) self.download_squad(self.save_path) else: print(self.dataset_name) assert False, 'Unknown dataset_name provided to downloader' def download_bookscorpus(self): downloader = BooksDownloader(self.save_path) downloader.download() def download_wikicorpus(self, language): downloader = WikiDownloader(language, self.save_path) downloader.download() def download_google_pretrained_weights(self): downloader = GooglePretrainedWeightDownloader(self.save_path) downloader.download() def download_nvidia_pretrained_weights(self): downloader = NVIDIAPretrainedWeightDownloader(self.save_path) downloader.download() def download_mrpc(self): downloader = MRPCDownloader(self.save_path) downloader.download() def download_squad(self): downloader = SquadDownloader(self.save_path) downloader.download()
PyTorch/Forecasting/TFT/triton/runner
runner
runner_proxy
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import List, Type # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .config import Config from .executor import Executor from .finalizer import Finalizer from .maintainer import Maintainer from .pipeline import Pipeline from .preparer import Preparer from .runner import Runner class RunnerProxy: """ Runner proxy to configure original runner """ maintainer_cls: Type[Maintainer] = None executor_cls: Type[Executor] = None preparer_cls: Type[Preparer] = None finalizer_cls: Type[Finalizer] = None def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]): """ RunnerProxy constructor Args: config: Config object pipeline: Pipeline to evaluate devices: List of devices to use for tests """ self._runner = Runner( config=config, pipeline=pipeline, devices=devices, maintainer_cls=self.maintainer_cls, executor_cls=self.executor_cls, preparer_cls=self.preparer_cls, finalizer_cls=self.finalizer_cls, ) def start(self) -> None: """ Runner interface """ self._runner.start()
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner
runner
configuration
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Any, Dict, List, Union # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .task import DataObject class Configuration(DataObject): """ Configuration object - handle single experiment data """ def __init__( self, precision: str, format: str, batch_size: Union[str, List], accelerator: str, triton_gpu_engine_count: int, triton_max_queue_delay: int, capture_cuda_graph: int, checkpoint_variant: str, triton_preferred_batch_sizes: Union[str, List], **kwargs: Any, ): """ Args: precision: Target model precision format: Target conversion format batch_size: Batch sizes to evaluate accelerator: Triton Backend Accelerator triton_gpu_engine_count: Number of model instances triton_max_queue_delay: Maximal queue delay capture_cuda_graph: Triton Capture CUDA Graph optimization for tensorrt checkpoint_variant: Checkpoint used for configuration triton_preferred_batch_sizes: Preferred batch sizes **kwargs: Additional model arguments """ if isinstance(batch_size, str): batch_size = map(lambda item: int(item), batch_size.split(",")) if isinstance(triton_preferred_batch_sizes, str): triton_preferred_batch_sizes = map(lambda item: int(item), triton_preferred_batch_sizes.split(" ")) self.precision = precision self.format = format self.batch_size = sorted(batch_size) self.accelerator = accelerator self.triton_gpu_engine_count = triton_gpu_engine_count self.triton_max_queue_delay = triton_max_queue_delay self.capture_cuda_graph = capture_cuda_graph self.max_batch_size = max(self.batch_size) self.checkpoint_variant = checkpoint_variant self.triton_preferred_batch_sizes = " ".join(map(lambda i: str(i), sorted(triton_preferred_batch_sizes))) for key, value in kwargs.items(): self.__setattr__(key, value) @property def parameters(self) -> Dict: """ Return values stored in configuration Returns: Dictionary with configuration parameters """ return self.__dict__
PyTorch/SpeechRecognition/QuartzNet/common/text
text
__init__
# Copyright (c) 2017 Keith Ito """ from https://github.com/keithito/tacotron """ import re import string from . import cleaners def _clean_text(text, cleaner_names, *args): for name in cleaner_names: cleaner = getattr(cleaners, name) if not cleaner: raise Exception('Unknown cleaner: %s' % name) text = cleaner(text, *args) return text def punctuation_map(labels): # Punctuation to remove punctuation = string.punctuation punctuation = punctuation.replace("+", "") punctuation = punctuation.replace("&", "") # TODO We might also want to consider: # @ -> at # # -> number, pound, hashtag # ~ -> tilde # _ -> underscore # % -> percent # If a punctuation symbol is inside our vocab, we do not remove from text for l in labels: punctuation = punctuation.replace(l, "") # Turn all punctuation to whitespace table = str.maketrans(punctuation, " " * len(punctuation)) return table
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2
tacotron2
tacotron2Instance
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "tacotron2Instance.h" #include "dataShuffler.h" #include "NvInfer.h" #include "cuda_runtime.h" #include <cstdint> #include <iostream> #include <stdexcept> #include <string> #include <vector> using namespace nvinfer1; namespace tts { /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Tacotron2Instance::Tacotron2Instance( TRTPtr<ICudaEngine> encoderEngine, TRTPtr<ICudaEngine> decoderPlainEngine, TRTPtr<ICudaEngine> decoderPluginsEngine, TRTPtr<ICudaEngine> postnetEngine) : TimedObject("Tacotron2Instance::infer()"), mStreamingInstance( std::move(encoderEngine), std::move(decoderPlainEngine), std::move(decoderPluginsEngine), std::move(postnetEngine)), mChunkSize(mStreamingInstance.getMaxBatchSize()), mNumMelChunks(mStreamingInstance.getChunkSize()), mEarlyExit(true), mOutputShuffledDevice(getRequiredOutputSize( getMaxBatchSize(), getMaximumInputLength() * 10 + 100)) { // setup timing addChild(&mStreamingInstance); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void Tacotron2Instance::infer(const int batchSize, const int* const inputDevice, const int inputSpacing, const int* const inputLength, const int maxOutputLength, float* const outputDevice, int* const outputLength) { startTiming(); cudaStream_t stream; cudaStreamCreate(&stream); mStreamingInstance.startInference( batchSize, inputDevice, inputSpacing, inputLength, stream); // do decoding float* intermediateOutput; if (batchSize > 1) { if (static_cast<size_t>(getRequiredOutputSize(batchSize, maxOutputLength)) > mOutputShuffledDevice.size()) { mOutputShuffledDevice = CudaMemory<float>( getRequiredOutputSize(batchSize, maxOutputLength)); } intermediateOutput = mOutputShuffledDevice.data(); } else { intermediateOutput = outputDevice; } std::fill(outputLength, outputLength + batchSize, 0); const int numBlocks = ((maxOutputLength - 1) / mNumMelChunks) + 1; bool moreToDo = false; for (int block = 0; block < numBlocks; ++block) { const int numFramesTotal = block * mNumMelChunks; const int offset = block * batchSize * mStreamingInstance.getChunkSize() * mStreamingInstance.getNumMelChannels(); if (numFramesTotal + mNumMelChunks > maxOutputLength) { mStreamingInstance.setNextChunkSize(maxOutputLength - numFramesTotal); } moreToDo = mStreamingInstance.inferNext( intermediateOutput + offset, mChunkSize.data(), stream); for (int batchIndex = 0; batchIndex < batchSize; ++batchIndex) { if (mEarlyExit) { outputLength[batchIndex] += mChunkSize[batchIndex]; } else { outputLength[batchIndex] += mStreamingInstance.getChunkSize(); } } if (mEarlyExit && !moreToDo) { break; } } if (mEarlyExit && moreToDo) { std::cerr << "One or more sequences failed to finish." << std::endl; } if (batchSize > 1) { // take the output from TNC to NTC // re-shuffle final output DataShuffler::shuffleMels( mOutputShuffledDevice.data(), outputDevice, batchSize, mStreamingInstance.getNumMelChannels(), mStreamingInstance.getChunkSize(), numBlocks, maxOutputLength, stream); } CudaUtils::sync(stream); cudaStreamDestroy(stream); stopTiming(); } void Tacotron2Instance::setEarlyExit(const bool earlyExit) { mEarlyExit = earlyExit; } void Tacotron2Instance::setSeed(const unsigned int seed) { mStreamingInstance.setSeed(seed); } int Tacotron2Instance::getNumMelChannels() const { return mStreamingInstance.getNumMelChannels(); } int Tacotron2Instance::getMaximumInputLength() const { return mStreamingInstance.getMaximumInputLength(); } int Tacotron2Instance::getMaxBatchSize() const { return mStreamingInstance.getMaxBatchSize(); } int Tacotron2Instance::getRequiredOutputSize(const int batchSize, const int maxFrames) const { if (batchSize > getMaxBatchSize()) { throw std::runtime_error("Maximum batch size is " + std::to_string(getMaxBatchSize()) + " but got " + std::to_string(batchSize) + "."); } const int numMelChunks = mStreamingInstance.getChunkSize(); const int frameCeil = (((maxFrames - 1) / mNumMelChunks) + 1) * numMelChunks; return batchSize * frameCeil * getNumMelChannels(); } void Tacotron2Instance::usePlugins(const bool usePlugins) { mStreamingInstance.usePlugins(usePlugins); } bool Tacotron2Instance::willUsePlugins(const int batchSize) const { return mStreamingInstance.willUsePlugins(batchSize); } } // namespace tts
PyTorch/SpeechRecognition/wav2vec2/scripts/docker
docker
run
#!/usr/bin/env bash # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SCRIPT_DIR=$(cd $(dirname $0); pwd) : ${DATASET_DIR:=$SCRIPT_DIR/../../datasets} set -eux docker run -it --rm \ --gpus all \ --env PYTHONDONTWRITEBYTECODE=1 \ --ipc=host \ -v "$DATASET_DIR:/datasets" \ -v "$SCRIPT_DIR/../..:/workspace/wav2vec2" \ wav2vec2:latest bash
TensorFlow2/Classification/ConvNets/dataloader
dataloader
dali_index
#!/bin/bash SRC_DIR=${1} DST_DIR=${2} echo "Creating training file indexes" mkdir -p ${DST_DIR} for file in ${SRC_DIR}/train-*; do BASENAME=$(basename $file) DST_NAME=$DST_DIR/$BASENAME echo "Creating index $DST_NAME for $file" tfrecord2idx $file $DST_NAME done echo "Creating validation file indexes" for file in ${SRC_DIR}/validation-*; do BASENAME=$(basename $file) DST_NAME=$DST_DIR/$BASENAME echo "Creating index $DST_NAME for $file" tfrecord2idx $file $DST_NAME done
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules
quick_schedules
e2e_mask_rcnn_R_50_C4_quick
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" RPN: PRE_NMS_TOP_N_TEST: 6000 POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: BATCH_SIZE_PER_IMAGE: 256 ROI_MASK_HEAD: PREDICTOR: "MaskRCNNC4Predictor" SHARE_BOX_FEATURE_EXTRACTOR: True MASK_ON: True DATASETS: TRAIN: ("coco_2014_minival",) TEST: ("coco_2014_minival",) INPUT: MIN_SIZE_TRAIN: 600 MAX_SIZE_TRAIN: 1000 MIN_SIZE_TEST: 800 MAX_SIZE_TEST: 1000 SOLVER: BASE_LR: 0.005 WEIGHT_DECAY: 0.0001 STEPS: (1500,) MAX_ITER: 2000 IMS_PER_BATCH: 4 TEST: IMS_PER_BATCH: 2
PyTorch/Translation/Transformer
Transformer
train
#!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # #------------------------------------------------------------------------- # # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import os import math import time import ctypes from copy import deepcopy import torch import sacrebleu import dllogger as DLLogger from fairseq import data, distributed_utils, options, utils, tokenizer from fairseq.ddp_trainer import DDPTrainer from fairseq.meters import StopwatchMeter from fairseq.sequence_generator import SequenceGenerator from fairseq.data import data_utils, load_dataset_splits from fairseq.models import build_model from fairseq.log_helper import setup_logger, reset_perf_meters def main(args): print(args) setup_logger(args) if not torch.cuda.is_available(): raise NotImplementedError('Training on CPU is not supported') torch.cuda.set_device(args.local_rank) if args.distributed_world_size > 1: assert torch.distributed.is_initialized() torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0) torch.cuda.synchronize() pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int)) ctypes.CDLL('libcudart.so').cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128)) ctypes.CDLL('libcudart.so').cudaDeviceGetLimit(pValue, ctypes.c_int(0x05)) torch.manual_seed(args.seed) src_dict, tgt_dict = data_utils.load_dictionaries(args) add_extra_items_to_checkpoint({'src_dict': src_dict, 'tgt_dict': tgt_dict}) datasets = load_dataset_splits(args, ['train', 'valid', 'test'], src_dict, tgt_dict) model = build_model(args) print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters()))) # Build trainer if torch.cuda.get_device_capability(0)[0] >= 7 and not args.amp: print('| NOTICE: your device may support faster training with --amp') trainer = DDPTrainer(args, model) print('| model {}, criterion {}'.format(args.arch, trainer.criterion.__class__.__name__)) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format( args.max_tokens, args.max_sentences, )) epoch_itr = data.EpochBatchIterator( dataset=datasets[args.train_subset], max_tokens=args.max_tokens, max_sentences=args.max_sentences_valid, max_positions=args.max_positions, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, ) # Load the latest checkpoint if one is available load_checkpoint(args, trainer, epoch_itr) # Send a dummy batch to warm the caching allocator dummy_batch = data_utils.get_dummy_batch(args.max_tokens, src_dict, tgt_dict) trainer.dummy_train_step(dummy_batch) # Sanity check if args.do_sanity_check: print('Performing sanity check...') sanity_score = score(args, trainer, datasets['test'], src_dict, tgt_dict, 'test.raw.de') DLLogger.log(step='SANITY_CHECK', data={'sanity_check_score': sanity_score}, verbosity=1) # Train until the learning rate gets too small or model reaches target score max_epoch = args.max_epoch or math.inf max_update = args.max_update or math.inf tgt_bleu = args.target_bleu or math.inf current_bleu = 0.0 best_bleu = -1.0 lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_losses = [None] valid_subsets = args.valid_subset.split(',') run_summary = {'loss': float('inf'), 'val_loss': float('inf'), 'speed': 0, 'accuracy': 0} while lr >= args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update and current_bleu < tgt_bleu: DLLogger.log(step=trainer.get_num_updates()+1, data={'epoch': epoch_itr.epoch}, verbosity=0) # train for one epoch train(args, trainer, epoch_itr) DLLogger.log(step=trainer.get_num_updates(), data={'walltime': train_meter.sum}, verbosity=1) DLLogger.log(step=trainer.get_num_updates(), data={'avg_epoch_loss': trainer.avg_loss_meter.avg}, verbosity=1) if epoch_itr.epoch % args.validate_interval == 0: valid_losses = validate(args, trainer, datasets, valid_subsets) valid_bleu = score(args, trainer, datasets[valid_subsets[0]], src_dict, tgt_dict, 'valid.raw.de') DLLogger.log(step=trainer.get_num_updates(), data={'val_loss': valid_losses[0], 'val_bleu': valid_bleu}, verbosity=1) # Eval BLEU score if args.online_eval or (tgt_bleu is not math.inf): current_bleu = score(args, trainer, datasets[args.gen_subset], src_dict, tgt_dict, 'test.raw.de') DLLogger.log(step=trainer.get_num_updates(), data={'test_bleu': current_bleu}, verbosity=1) best_bleu = max(best_bleu, current_bleu) run_summary['val_loss'] = min(run_summary['val_loss'], valid_losses[0]) run_summary['accuracy'] = best_bleu if best_bleu >= 0 else valid_bleu run_summary['loss'] = valid_losses[0] run_summary['speed'] = trainer.throughput_meter.u_avg # Only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) train_meter.stop() run_summary['walltime'] = train_meter.sum DLLogger.log(step=(), data=run_summary, verbosity=0) print('| done training in {:.1f} seconds'.format(train_meter.sum)) def train(args, trainer, epoch_itr): """Train the model for one epoch.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr() # update parameters every N batches if epoch_itr.epoch <= len(args.update_freq): update_freq = args.update_freq[epoch_itr.epoch - 1] else: update_freq = args.update_freq[-1] max_update = args.max_update or math.inf num_batches = len(epoch_itr) torch.cuda.synchronize() begin = time.time() # reset meters DLLogger.flush() trainer.get_throughput_meter().reset() for i, sample in enumerate(itr): if i < num_batches - 1 and (i + 1) % update_freq > 0: # buffer updates according to --update-freq trainer.train_step(sample, update_params=False, last_step=(i == len(itr)-1)) continue else: trainer.train_step(sample, update_params=True, last_step=(i == len(itr)-1)) # ignore the first mini-batch in words-per-second calculation if i == 0: trainer.get_throughput_meter().reset() reset_perf_meters() if (i+1) % args.log_interval == 0: DLLogger.flush() if trainer.get_num_updates() >= max_update: break torch.cuda.synchronize() print('Epoch time:', time.time() - begin) # Print epoch stats and reset training meters DLLogger.log(step=trainer.get_num_updates(), data={'speed': trainer.get_throughput_meter().avg}, verbosity=0) DLLogger.flush() def validate(args, trainer, datasets, subsets): """Evaluate the model on the validation set(s) and return the losses.""" valid_losses = [] for subset in subsets: if len(subsets) > 1: print('Validating on \'{}\' subset'.format(subset)) # Initialize data iterator itr = data.EpochBatchIterator( dataset=datasets[subset], max_tokens=args.max_tokens, max_sentences=args.max_sentences_valid, max_positions=args.max_positions, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, ).next_epoch_itr(shuffle=False) # reset validation loss meters DLLogger.flush() subset_losses = [] for sample in itr: loss = trainer.valid_step(sample) subset_losses.append(loss) subset_loss = sum(subset_losses)/len(subset_losses) DLLogger.flush() valid_losses.append(subset_loss) print(f'Validation loss on subset {subset}: {subset_loss}') return valid_losses def score(args, trainer, dataset, src_dict, tgt_dict, ref_file): torch.cuda.synchronize() begin = time.time() src_dict = deepcopy(src_dict) # This is necessary, generation of translations tgt_dict = deepcopy(tgt_dict) # alters target dictionary messing up with the rest of training model = trainer.get_model() # Initialize data iterator itr = data.EpochBatchIterator( dataset=dataset, max_tokens=None, max_sentences=max(8, min(math.ceil(1024/args.distributed_world_size), 128)), max_positions=args.max_positions, required_batch_size_multiple=8, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, ).next_epoch_itr(shuffle=False) # Initialize generator gen_timer = StopwatchMeter() translator = SequenceGenerator( [model], tgt_dict.get_metadata(), maxlen=args.max_target_positions - 1, # do not include EOS token beam_size=args.beam, stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized), len_penalty=args.lenpen, unk_penalty=args.unkpen, sampling=args.sampling, sampling_topk=args.sampling_topk, minlen=args.min_len, use_amp=args.amp, ) # Generate and compute BLEU predictions = [] translations = translator.generate_batched_itr( itr, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b, cuda=True, timer=gen_timer, prefix_size=args.prefix_size, ) for sample_id, src_tokens, _, hypos in translations: # Process input and grount truth src_str = src_dict.string(src_tokens, args.remove_bpe) # Process top predictions for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]): _, hypo_str, _ = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe ) # Score only the top hypothesis if i == 0: hypo_str = tokenizer.Tokenizer.detokenize(hypo_str, 'de') predictions.append('{}\t{}'.format(sample_id, hypo_str)) if args.distributed_world_size > 1: predictions = _all_gather_predictions(predictions) with open(os.path.join(args.data, ref_file), 'r') as reference: refs = [reference.readlines()] # reducing indexed predictions as strings is more memory efficient than reducing tuples predictions = [tuple(item.split('\t')) for item in predictions] predictions = [(int(item[0]), item[1]) for item in predictions] predictions.sort(key=lambda tup: tup[0]) predictions = [hypo[1] + ('\n' if hypo[1][-1] != '\n' else '') for hypo in predictions] sacrebleu_score = sacrebleu.corpus_bleu(predictions, refs, lowercase=not args.test_cased_bleu).score if args.save_predictions: os.makedirs(os.path.join(args.save_dir, 'predictions'), exist_ok=True) fname = ref_file + '.pred.update_{}'.format(trainer.get_num_updates()) save_path = os.path.join(args.save_dir, 'predictions', fname) with open(save_path, 'w') as f: f.write(''.join(predictions)) DLLogger.log(step=trainer.get_num_updates(), data={'inference tokens/s': float(args.distributed_world_size) / gen_timer.avg}, verbosity=0) DLLogger.flush() if gen_timer.sum != 0: print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format( len(predictions), gen_timer.n, gen_timer.sum, len(predictions) / gen_timer.sum, float(args.distributed_world_size)/gen_timer.avg )) torch.cuda.synchronize() print('| Eval completed in: {:.2f}s | {}CASED BLEU {:.2f}'.format( time.time()-begin, '' if args.test_cased_bleu else 'UN', sacrebleu_score )) return sacrebleu_score def _all_gather_predictions(predictions): ready = False all_ready = False reduced_predictions = [] max_size = 65000 while not all_ready: lst_len = len(predictions) size = 2000 # some extra space for python stuff n = 0 while n < lst_len: str_len = len(predictions[n].encode('utf8')) + 8 # per string pickle overhead if size + str_len >= max_size: break size += str_len n += 1 chunk = predictions[:n] predictions = predictions[n:] if not predictions: ready = True chunk = (ready, chunk) torch.cuda.synchronize() gathered = distributed_utils.all_gather_list(chunk, max_size=65000) torch.cuda.synchronize() reduced_predictions += [t[1] for t in gathered] all_ready = all([t[0] for t in gathered]) reduced_predictions = [item for sublist in reduced_predictions for item in sublist] return reduced_predictions def save_checkpoint(args, trainer, epoch_itr, val_loss): if epoch_itr.epoch % args.save_interval != 0: return if args.no_save or not distributed_utils.is_master(args): return epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() checkpoint_conds = collections.OrderedDict() checkpoint_conds['checkpoint{}.pt'.format(epoch)] = end_of_epoch and not args.no_epoch_checkpoints checkpoint_conds['checkpoint_best.pt'] = ( val_loss is not None and (not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best) ) checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink prev_best = getattr(save_checkpoint, 'best', val_loss) if val_loss is not None: save_checkpoint.best = min(val_loss, prev_best) extra_state = { 'best': save_checkpoint.best, 'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss, } extra_state.update(save_checkpoint.extra_items) checkpoints = [os.path.join(args.save_dir, 'checkpoints', fn) for fn, cond in checkpoint_conds.items() if cond] if checkpoints: for cp in checkpoints: trainer.save_checkpoint(cp, extra_state) def add_extra_items_to_checkpoint(items): if not hasattr(save_checkpoint, 'extra_items'): save_checkpoint.extra_items = {} save_checkpoint.extra_items.update(items) def load_checkpoint(args, trainer, epoch_itr): """Load a checkpoint and replay dataloader to match.""" os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True) checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file) if os.path.isfile(checkpoint_path): extra_state = trainer.load_checkpoint(checkpoint_path) if extra_state is not None: # replay train iterator to match checkpoint epoch_itr.load_state_dict(extra_state['train_iterator']) print('| loaded checkpoint {} (epoch {} @ {} updates)'.format( checkpoint_path, epoch_itr.epoch, trainer.get_num_updates())) trainer.lr_step(epoch_itr.epoch) trainer.lr_step_update(trainer.get_num_updates()) if 'best' in extra_state: save_checkpoint.best = extra_state['best'] if __name__ == '__main__': parser = options.get_training_parser() ARGS = options.parse_args_and_arch(parser) distributed_utils.distributed_init(ARGS) main(ARGS)
PyTorch/Classification/GPUNet/triton/08ms-D/runner
runner
__main__
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import pathlib from typing import List if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.config import Config from ...runner.executor import Executor from ...runner.finalizer import ExperimentFinalizer from ...runner.maintainer import DockerMaintainer from ...runner.preparer import ExperimentPreparer from ...runner.runner_proxy import RunnerProxy from .pipeline_impl import pipeline class ExperimentRunner(RunnerProxy): """ Experiment Runner proxy for runner wrapper """ maintainer_cls = DockerMaintainer executor_cls = Executor preparer_cls = ExperimentPreparer finalizer_cls = ExperimentFinalizer def execute(config_path: str, devices: List[str]): if len(devices) == 0: devices = ["0"] config = Config.from_file(config_path) runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices) runner.start() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.") parser.add_argument( "--devices", type=str, nargs="*", required=False, help="Path to configuration file with details." ) args = parser.parse_args() config_path = args.config_path devices = args.devices execute(config_path, devices)
TensorFlow2/Detection/Efficientdet/efficientnet/blocks
blocks
__init__
from efficientnet.blocks.conv2d_block import conv2d_block from efficientnet.blocks.mb_conv_block import mb_conv_block __all__ = ['conv2d_block', 'mb_conv_block']
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssd_mobilenet_v1_0.75_depth_300x300_coco14_sync
# SSD with Mobilenet v1 0.75 depth multiplied feature extractor and focal loss. # Trained on COCO14, initialized from Imagenet classification checkpoint # Achieves 17.5 mAP on COCO14 minival dataset. Doubling the number of training # steps gets to 18.4. # This config is TPU compatible model { ssd { inplace_batchnorm_update: true freeze_batchnorm: false num_classes: 90 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true use_matmul_gather: true } } similarity_calculator { iou_similarity { } } encode_background_as_zeros: true anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 1 box_code_size: 4 apply_sigmoid_to_scores: false class_prediction_bias_init: -4.6 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.97, epsilon: 0.001, } } } } feature_extractor { type: 'ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 0.75 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.97, epsilon: 0.001, } } override_base_feature_extractor_hyperparams: true } loss { classification_loss { weighted_sigmoid_focal { alpha: 0.75, gamma: 2.0 } } localization_loss { weighted_smooth_l1 { delta: 1.0 } } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true normalize_loc_loss_by_codesize: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" batch_size: 2048 sync_replicas: true startup_delay_steps: 0 replicas_to_aggregate: 8 num_steps: 10000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } optimizer { momentum_optimizer: { learning_rate: { cosine_decay_learning_rate { learning_rate_base: 0.9 total_steps: 10000 warmup_learning_rate: 0.3 warmup_steps: 300 } } momentum_optimizer_value: 0.9 } use_moving_average: false } max_number_of_boxes: 100 unpad_groundtruth_tensors: false } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-00000-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" use_moving_averages: false num_examples: 8000 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-00000-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow2/Detection/Efficientdet/scripts/D0
D0
inference-benchmark
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkdir -p /tmp/inference-benchmark python3 inspector.py \ --model_name=efficientdet-d0 \ --mode=benchmark \ --batch_size=${BS:-8} \ --use_xla=True \ --amp=${AMP:-True} \ 2>&1 | tee /tmp/inference-benchmark/infer-benchmark.log
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda
bermuda
utils
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Callable, Dict, List import networkx as nx from ..core import ShapeSpec def infer_precision( nx_graph: nx.Graph, input_names: List[str], output_names: List[str], get_node_dtype_fn: Callable, ): node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes] node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]] dtypes_counter = Counter(node_dtypes) return dtypes_counter.most_common()[0][0] def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0): def _set_dynamic_shapes(t, shapes): for k, v in t.items(): shape = list(v.shape) for dim, s in enumerate(shape): if shapes[k][dim] != -1 and shapes[k][dim] != s: shapes[k][dim] = -1 ## get all shapes from input and output tensors input_shapes = {} output_shapes = {} for batch in dataloader: _, x, y = batch for k, v in x.items(): input_shapes[k] = list(v.shape) for k, v in y.items(): output_shapes[k] = list(v.shape) break # based on max <max_num_iters> iterations, check which # dimensions differ to determine dynamic_axes max_num_iters = 100 for idx, batch in enumerate(dataloader): if idx >= max_num_iters: break _, x, y = batch _set_dynamic_shapes(x, input_shapes) _set_dynamic_shapes(y, output_shapes) return input_shapes, output_shapes def get_dynamic_axes(dataloader, batch_size_dim=0): input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim) all_shapes = {**input_shapes, **output_shapes} dynamic_axes = {} for k, shape in all_shapes.items(): for idx, s in enumerate(shape): if s == -1: dynamic_axes[k] = {idx: k + "_" + str(idx)} for k, v in all_shapes.items(): if k in dynamic_axes: dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)}) else: dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)} return dynamic_axes def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]: def init_counters_and_shapes(x, counters, min_shapes, max_shapes): for k, v in x.items(): counters[k] = Counter() min_shapes[k] = [float("inf")] * v.ndim max_shapes[k] = [float("-inf")] * v.ndim counters = {} min_shapes: Dict[str, tuple] = {} max_shapes: Dict[str, tuple] = {} for idx, batch in enumerate(dataloader): ids, x, y = batch if idx == 0: init_counters_and_shapes(x, counters, min_shapes, max_shapes) for k, v in x.items(): shape = v.shape counters[k][shape] += 1 min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)]) max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)]) opt_shapes: Dict[str, tuple] = {} for k, v in counters.items(): opt_shapes[k] = v.most_common(1)[0][0] shapes = {} for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes shapes[k] = ShapeSpec( min=(1,) + min_shapes[k][1:], max=(max_batch_size,) + max_shapes[k][1:], opt=(max_batch_size,) + opt_shapes[k][1:], ) return shapes
PyTorch/SpeechSynthesis/Tacotron2/waveglow
waveglow
model
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import torch torch._C._jit_set_autocast_mode(False) import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable @torch.jit.script def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels : int): n_channels_int = n_channels in_act = input_a + input_b t_act = torch.tanh(in_act[:, :n_channels_int, :]) s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts class Invertible1x1Conv(torch.nn.Module): """ The layer outputs both the convolution, and the log determinant of its weight matrix. If reverse=True it does convolution with inverse """ def __init__(self, c): super(Invertible1x1Conv, self).__init__() self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0, bias=False) # Sample a random orthonormal matrix to initialize weights W = torch.linalg.qr(torch.FloatTensor(c, c).normal_())[0] # Ensure determinant is 1.0 not -1.0 if torch.det(W) < 0: W[:, 0] = -1 * W[:, 0] W = W.view(c, c, 1) W = W.contiguous() self.conv.weight.data = W def forward(self, z): # shape batch_size, group_size, n_of_groups = z.size() W = self.conv.weight.squeeze() # Forward computation log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze() z = self.conv(z) return z, log_det_W def infer(self, z): self._invert() return F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0) def _invert(self): if not hasattr(self, 'W_inverse'): W = self.conv.weight.squeeze() self.W_inverse = W.float().inverse().unsqueeze(-1).to(W.dtype) class WN(torch.nn.Module): """ This is the WaveNet like layer for the affine coupling. The primary difference from WaveNet is the convolutions need not be causal. There is also no dilation size reset. The dilation only doubles on each layer """ def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels, kernel_size): super(WN, self).__init__() assert(kernel_size % 2 == 1) assert(n_channels % 2 == 0) self.n_layers = n_layers self.n_channels = n_channels self.in_layers = torch.nn.ModuleList() self.res_skip_layers = torch.nn.ModuleList() self.cond_layers = torch.nn.ModuleList() start = torch.nn.Conv1d(n_in_channels, n_channels, 1) start = torch.nn.utils.weight_norm(start, name='weight') self.start = start # Initializing last layer to 0 makes the affine coupling layers # do nothing at first. This helps with training stability end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1) end.weight.data.zero_() end.bias.data.zero_() self.end = end for i in range(n_layers): dilation = 2 ** i padding = int((kernel_size * dilation - dilation) / 2) in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size, dilation=dilation, padding=padding) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1) cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') self.cond_layers.append(cond_layer) # last one is not necessary if i < n_layers - 1: res_skip_channels = 2 * n_channels else: res_skip_channels = n_channels res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1) res_skip_layer = torch.nn.utils.weight_norm( res_skip_layer, name='weight') self.res_skip_layers.append(res_skip_layer) def forward(self, audio, spect): audio = self.start(audio) output = 0 for i, (in_layer, cond_layer, res_skip_layer) in enumerate( zip(self.in_layers, self.cond_layers, self.res_skip_layers)): acts = fused_add_tanh_sigmoid_multiply( in_layer(audio), cond_layer(spect), self.n_channels) res_skip_acts = res_skip_layer(acts) if i < self.n_layers - 1: audio = res_skip_acts[:, :self.n_channels, :] + audio skip_acts = res_skip_acts[:, self.n_channels:, :] else: skip_acts = res_skip_acts output += skip_acts return self.end(output) class WaveGlow(torch.nn.Module): def __init__(self, n_mel_channels, n_flows, n_group, n_early_every, n_early_size, WN_config): super(WaveGlow, self).__init__() self.upsample = torch.nn.ConvTranspose1d(n_mel_channels, n_mel_channels, 1024, stride=256) assert(n_group % 2 == 0) self.n_flows = n_flows self.n_group = n_group self.n_early_every = n_early_every self.n_early_size = n_early_size self.WN = torch.nn.ModuleList() self.convinv = torch.nn.ModuleList() n_half = int(n_group / 2) # Set up layers with the right sizes based on how many dimensions # have been output already n_remaining_channels = n_group for k in range(n_flows): if k % self.n_early_every == 0 and k > 0: n_half = n_half - int(self.n_early_size / 2) n_remaining_channels = n_remaining_channels - self.n_early_size self.convinv.append(Invertible1x1Conv(n_remaining_channels)) self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config)) self.n_remaining_channels = n_remaining_channels def forward(self, forward_input): """ forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames forward_input[1] = audio: batch x time """ spect, audio = forward_input # Upsample spectrogram to size of audio spect = self.upsample(spect) assert(spect.size(2) >= audio.size(1)) if spect.size(2) > audio.size(1): spect = spect[:, :, :audio.size(1)] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1) spect = spect.permute(0, 2, 1) audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1) output_audio = [] log_s_list = [] log_det_W_list = [] for k in range(self.n_flows): if k % self.n_early_every == 0 and k > 0: output_audio.append(audio[:, :self.n_early_size, :]) audio = audio[:, self.n_early_size:, :] audio, log_det_W = self.convinv[k](audio) log_det_W_list.append(log_det_W) n_half = int(audio.size(1) // 2) audio_0 = audio[:, :n_half, :] audio_1 = audio[:, n_half:, :] output = self.WN[k](audio_0, spect) log_s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = torch.exp(log_s) * audio_1 + b log_s_list.append(log_s) audio = torch.cat([audio_0, audio_1], 1) output_audio.append(audio) return torch.cat(output_audio, 1), log_s_list, log_det_W_list def infer(self, spect, sigma=1.0): spect = self.upsample(spect) # trim conv artifacts. maybe pad spec to kernel multiple time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] spect = spect[:, :, :-time_cutoff] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1) spect = spect.permute(0, 2, 1) audio = torch.randn(spect.size(0), self.n_remaining_channels, spect.size(2), device=spect.device).to(spect.dtype) audio = torch.autograd.Variable(sigma * audio) for k in reversed(range(self.n_flows)): n_half = int(audio.size(1) / 2) audio_0 = audio[:, :n_half, :] audio_1 = audio[:, n_half:, :] output = self.WN[k](audio_0, spect) s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = (audio_1 - b) / torch.exp(s) audio = torch.cat([audio_0, audio_1], 1) audio = self.convinv[k].infer(audio) if k % self.n_early_every == 0 and k > 0: z = torch.randn(spect.size(0), self.n_early_size, spect.size( 2), device=spect.device).to(spect.dtype) audio = torch.cat((sigma * z, audio), 1) audio = audio.permute( 0, 2, 1).contiguous().view( audio.size(0), -1).data return audio def infer_onnx(self, spect, z, sigma=0.9): spect = self.upsample(spect) # trim conv artifacts. maybe pad spec to kernel multiple time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] spect = spect[:, :, :-time_cutoff] length_spect_group = spect.size(2)//8 mel_dim = 80 batch_size = spect.size(0) spect = spect.view((batch_size, mel_dim, length_spect_group, self.n_group)) spect = spect.permute(0, 2, 1, 3) spect = spect.contiguous() spect = spect.view((batch_size, length_spect_group, self.n_group*mel_dim)) spect = spect.permute(0, 2, 1) spect = spect.contiguous() audio = z[:, :self.n_remaining_channels, :] z = z[:, self.n_remaining_channels:self.n_group, :] audio = sigma*audio for k in reversed(range(self.n_flows)): n_half = int(audio.size(1) // 2) audio_0 = audio[:, :n_half, :] audio_1 = audio[:, n_half:(n_half+n_half), :] output = self.WN[k](audio_0, spect) s = output[:, n_half:(n_half+n_half), :] b = output[:, :n_half, :] audio_1 = (audio_1 - b) / torch.exp(s) audio = torch.cat([audio_0, audio_1], 1) audio = self.convinv[k].infer(audio) if k % self.n_early_every == 0 and k > 0: audio = torch.cat((z[:, :self.n_early_size, :], audio), 1) z = z[:, self.n_early_size:self.n_group, :] audio = audio.permute(0,2,1).contiguous().view(batch_size, (length_spect_group * self.n_group)) return audio def _infer_ts(self, spect, sigma : float=1.0): spect = self.upsample(spect) # trim conv artifacts. maybe pad spec to kernel multiple time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0] spect = spect[:, :, :-time_cutoff] spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3) spect = spect.contiguous().view(spect.size(0), spect.size(1), -1) spect = spect.permute(0, 2, 1) audio = torch.randn(spect.size(0), self.n_remaining_channels, spect.size(2), device=spect.device, dtype=spect.dtype) audio *= sigma for kk, (wn, convinv) in enumerate(zip(self.WN_rev, self.convinv_rev)): k = self.n_flows - kk - 1 n_half = int(audio.size(1) / 2) audio_0 = audio[:, :n_half, :] audio_1 = audio[:, n_half:, :] output = wn(audio_0, spect) s = output[:, n_half:, :] b = output[:, :n_half, :] audio_1 = (audio_1 - b) / torch.exp(s) audio = torch.cat([audio_0, audio_1], 1) audio = convinv.infer(audio) if k % self.n_early_every == 0 and k > 0: z = torch.randn(spect.size(0), self.n_early_size, spect.size(2), device=spect.device, dtype=spect.dtype) audio = torch.cat((sigma * z, audio), 1) return audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1).data def make_ts_scriptable(self, forward_is_infer=True): self.WN_rev = torch.nn.ModuleList(reversed(self.WN)) self.convinv_rev = torch.nn.ModuleList(reversed(self.convinv)) for conv in self.convinv_rev: conv._invert() self.infer = self._infer_ts if forward_is_infer: self.forward = self._infer_ts @staticmethod def remove_weightnorm(model): waveglow = model for WN in waveglow.WN: WN.start = torch.nn.utils.remove_weight_norm(WN.start) WN.in_layers = remove(WN.in_layers) WN.cond_layers = remove(WN.cond_layers) WN.res_skip_layers = remove(WN.res_skip_layers) return waveglow def remove(conv_list): new_conv_list = torch.nn.ModuleList() for old_conv in conv_list: old_conv = torch.nn.utils.remove_weight_norm(old_conv) new_conv_list.append(old_conv) return new_conv_list
TensorFlow/Segmentation/UNet_Industrial/model/blocks
blocks
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== from model.blocks.activation_blck import activation_block from model.blocks.activation_blck import authorized_activation_fn from model.blocks.unet_downsample import downsample_unet_block from model.blocks.unet_upsample import upsample_unet_block from model.blocks.unet_bottleneck import bottleneck_unet_block from model.blocks.unet_io_blocks import input_unet_block from model.blocks.unet_io_blocks import output_unet_block __all__ = [ 'activation_block', 'authorized_activation_fn', 'upsample_unet_block', 'upsample_unet_block', 'bottleneck_unet_block', 'input_unet_block', 'output_unet_block', ]
PyTorch/Forecasting/TFT/triton/scripts/docker
docker
interactive
#!/usr/bin/env bash # Copyright (c) 2021-2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DATASET_PATH=${1:-"/data/"} NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=0} docker run -it --rm \ --runtime=nvidia \ -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \ --net=host \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ --ipc=host \ -e WORKDIR="$(pwd)" \ -e PYTHONPATH="$(pwd)" \ -v ${DATASET_PATH}/processed/:"$(pwd)"/datasets/ \ -v "$(pwd)":"$(pwd)" \ -v /var/run/docker.sock:/var/run/docker.sock \ -w "$(pwd)" \ tft:latest bash
PyTorch/Classification/GPUNet/triton/085ms/runner
runner
start_NVIDIA-DGX-A100-(1x-A100-80GB)
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Evaluate Runner python3 -m "triton.085ms.runner.__main__" \ --config-path "triton/085ms/runner/config_NVIDIA-DGX-A100-(1x-A100-80GB).yaml" \ --device 0
PyTorch/SpeechSynthesis/HiFiGAN/platform
platform
DGXA100_HiFi-GAN_AMP_1GPU
#!/bin/bash set -a : ${NUM_GPUS:=1} : ${BATCH_SIZE:=128} : ${GRAD_ACCUMULATION:=1} : ${AMP:=true} bash scripts/train_lj22khz.sh "$@" --no_amp_grouped_conv
Tools/PyTorch/TimeSeriesPredictionPlatform/models
models
tspp_xgboost
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import cudf import pandas as pd import pynvml import numpy as np import xgboost as xgb import os import glob import dask_cudf from distributed_utils import create_client class TSPPXGBoost(): def __init__(self, config): self.config = config self.models = [] def fit(self, train, label, valid, valid_label, **kwargs): X = xgb.DeviceQuantileDMatrix(cudf.from_pandas(train), label=cudf.from_pandas(label)) V = xgb.DMatrix(cudf.from_pandas(valid), label=cudf.from_pandas(valid_label)) model = xgb.train(params=self.config, dtrain=X, num_boost_round=self.config.n_rounds, evals=[(X, 'train'), (V, 'valid')], early_stopping_rounds=kwargs.get('patience', 5), verbose_eval=kwargs.get("log_interval", 25), ) self.models.append(model) def predict(self, test, i): model = self.models[i] X = xgb.DMatrix(cudf.from_pandas(test)) return model.predict(X) def save(self, path): os.makedirs(os.path.join(path, 'checkpoints'), exist_ok=True) for i in range(len(self.models)): model = self.models[i] model.save_model(os.path.join(path, f'checkpoints/xgb_{i+1}.model')) def load(self, path): self.models = [] for i in range(self.config.example_length - self.config.encoder_length): p = os.path.join(path, f'checkpoints/xgb_{i+1}.model') model = xgb.Booster() model.load_model(p) self.models.append(model) class TSPPDaskXGBoost(): def __init__(self, config): self.config = config self.models = [] self.client = create_client(config) self.npartitions = self.config.cluster.npartitions def fit(self, train, label, valid, valid_label, **kwargs): X = xgb.dask.DaskDeviceQuantileDMatrix(self.client, dask_cudf.from_cudf(cudf.from_pandas(train), npartitions=self.npartitions), label=dask_cudf.from_cudf(cudf.from_pandas(label), npartitions=self.npartitions)) V = xgb.dask.DaskDMatrix(self.client, dask_cudf.from_cudf(cudf.from_pandas(valid), npartitions=self.npartitions), label=dask_cudf.from_cudf(cudf.from_pandas(valid_label), npartitions=self.npartitions)) model = xgb.dask.train(client=self.client, params=self.config, dtrain=X, num_boost_round=self.config.n_rounds, evals=[(X, 'train'), (V, 'valid')], early_stopping_rounds=kwargs.get('patience', 5), verbose_eval=kwargs.get("log_interval", 25), ) self.models.append(model) self.client.restart() def predict(self, test, i): test = test.reset_index(drop=True) model = self.models[i] test = dask_cudf.from_cudf(cudf.from_pandas(test), npartitions=self.npartitions) test = xgb.dask.DaskDMatrix(self.client, test) out = xgb.dask.predict(self.client, model, test) return out.compute() def save(self, path): os.makedirs(os.path.join(path, 'checkpoints'), exist_ok=True) for i in range(len(self.models)): model = self.models[i] model['booster'].save_model(os.path.join(path, f'checkpoints/xgb_{i+1}.model')) def load(self, path): self.models = [] for i in range(self.config.example_length - self.config.encoder_length): p = os.path.join(path, f'checkpoints/xgb_{i+1}.model') model = {'booster': xgb.dask.Booster()} model['booster'].load_model(p) self.models.append(model)
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact
tensorflow_dot_based_interact
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from tensorflow_dot_based_interact.python.ops.dot_based_interact_ops import dot_based_interact
PyTorch/Segmentation/MaskRCNN/pytorch/configs
configs
e2e_mask_rcnn_R_50_C4_1x
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" RPN: PRE_NMS_TOP_N_TEST: 6000 POST_NMS_TOP_N_TEST: 1000 ROI_MASK_HEAD: PREDICTOR: "MaskRCNNC4Predictor" SHARE_BOX_FEATURE_EXTRACTOR: True MASK_ON: True DATASETS: TRAIN: ("coco_2014_train", "coco_2014_valminusminival") TEST: ("coco_2014_minival",) SOLVER: BASE_LR: 0.01 WEIGHT_DECAY: 0.0001 STEPS: (120000, 160000) MAX_ITER: 180000 IMS_PER_BATCH: 8
PyTorch/Classification/ConvNets/resnext101-32x4d/training/AMP
AMP
DGXA100_resnext101-32x4d_AMP_90E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision AMP --mode convergence --platform DGXA100 /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
PyTorch/SpeechSynthesis/FastPitch/platform
platform
DGX1_FastPitch_AMP_4GPU
#!/bin/bash set -a : ${NUM_GPUS:=4} : ${BATCH_SIZE:=16} : ${GRAD_ACCUMULATION:=4} : ${AMP:=true} bash scripts/train.sh "$@"
TensorFlow/Detection/SSD/models/research/object_detection/box_coders
box_coders
mean_stddev_box_coder_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.box_coder.mean_stddev_boxcoder.""" import tensorflow as tf from object_detection.box_coders import mean_stddev_box_coder from object_detection.core import box_list class MeanStddevBoxCoderTest(tf.test.TestCase): def testGetCorrectRelativeCodesAfterEncoding(self): box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]] boxes = box_list.BoxList(tf.constant(box_corners)) expected_rel_codes = [[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]] prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]]) priors = box_list.BoxList(prior_means) coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) rel_codes = coder.encode(boxes, priors) with self.test_session() as sess: rel_codes_out = sess.run(rel_codes) self.assertAllClose(rel_codes_out, expected_rel_codes) def testGetCorrectBoxesAfterDecoding(self): rel_codes = tf.constant([[0.0, 0.0, 0.0, 0.0], [-5.0, -5.0, -5.0, -3.0]]) expected_box_corners = [[0.0, 0.0, 0.5, 0.5], [0.0, 0.0, 0.5, 0.5]] prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 1.0, 0.8]]) priors = box_list.BoxList(prior_means) coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1) decoded_boxes = coder.decode(rel_codes, priors) decoded_box_corners = decoded_boxes.get() with self.test_session() as sess: decoded_out = sess.run(decoded_box_corners) self.assertAllClose(decoded_out, expected_box_corners) if __name__ == '__main__': tf.test.main()
TensorFlow2/Recommendation/WideAndDeep/tests
tests
test_with_opts
#!/bin/bash set -e set -x NAMES=${1:-'*.yaml'} TARGET=/wd/tests/feature_specs/${NAMES} OPTIONS=${2-""} for file in ${TARGET}; do echo "${file}"; done for fspec_file in ${TARGET}; do CSV_DIR=/tmp/generated_data/csv_dir TRANS_DIR=/tmp/generated_data/trans_dir # generate data based on fspec python gen_csv.py --feature_spec_in ${fspec_file} --output ${CSV_DIR} --size 393216 #131072*3, bsize*3 cp ${fspec_file} ${CSV_DIR}/feature_spec.yaml python transcode.py --input ${CSV_DIR} --output ${TRANS_DIR} --chunk_size 16384 # to get 8 partitions out of 131072 rows EMBEDDING_SIZES_FILE=${TRANS_DIR}/embedding_sizes.json python gen_embedding_sizes.py --feature_spec_in ${fspec_file} --output ${EMBEDDING_SIZES_FILE} #horovodrun -np 1 sh hvd_wrapper.sh python main.py --dataset_path ${TRANS_DIR} ${OPTIONS} --disable_map_calculation --embedding_sizes_file ${EMBEDDING_SIZES_FILE} --num_epochs 10 horovodrun -np 8 sh hvd_wrapper.sh python main.py --dataset_path ${TRANS_DIR} ${OPTIONS} --disable_map_calculation --embedding_sizes_file ${EMBEDDING_SIZES_FILE} --num_epochs 6 --xla --amp rm -r ${CSV_DIR} rm -r ${TRANS_DIR} done #usage: bash tests/test_with_opts.sh
PyTorch/SpeechSynthesis/FastPitch/triton
triton
run_inference_on_triton
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script. It sends a request with data obtained from pointed data loader and dumps received data into npz files. Those files are stored in directory pointed by `--output-dir` argument. Currently, the client communicates with the Triton server asynchronously using GRPC protocol. Example call: ```shell script python ./triton/run_inference_on_triton.py \ --server-url localhost:8001 \ --model-name ResNet50 \ --model-version 1 \ --dump-labels \ --output-dir /results/dump_triton ``` """ import argparse import functools import logging import queue import threading import time from pathlib import Path from typing import Optional from tqdm import tqdm # pytype: disable=import-error try: from tritonclient import utils as client_utils # noqa: F401 from tritonclient.grpc import ( InferenceServerClient, InferInput, InferRequestedOutput, ) except ImportError: import tritongrpcclient as grpc_client from tritongrpcclient import ( InferenceServerClient, InferInput, InferRequestedOutput, ) # pytype: enable=import-error # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file from .deployment_toolkit.dump import NpzWriter LOGGER = logging.getLogger("run_inference_on_triton") class AsyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 DEFAULT_MAX_UNRESP_REQS = 128 DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, max_unresponded_reqs: Optional[int] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs self._results = queue.Queue() self._processed_all = False self._errors = [] self._num_waiting_for = 0 self._sync = threading.Condition() self._req_thread = threading.Thread(target=self.req_loop, daemon=True) def __iter__(self): self._req_thread.start() timeout_s = 0.050 # check flags processed_all and error flags every 50ms while True: try: ids, x, y_pred, y_real = self._results.get(timeout=timeout_s) yield ids, x, y_pred, y_real except queue.Empty: shall_stop = self._processed_all or self._errors if shall_stop: break LOGGER.debug("Waiting for request thread to stop") self._req_thread.join() if self._errors: error_msg = "\n".join(map(str, self._errors)) raise RuntimeError(error_msg) def _on_result(self, ids, x, y_real, output_names, result, error): with self._sync: if error: self._errors.append(error) else: y_pred = {name: result.as_numpy(name) for name in output_names} self._results.put((ids, x, y_pred, y_real)) self._num_waiting_for -= 1 self._sync.notify_all() def req_loop(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) self._errors = self._verify_triton_state(client) if self._errors: return LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) outputs_req = [InferRequestedOutput(name) for name in outputs] self._num_waiting_for = 0 for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) with self._sync: def _check_can_send(): return self._num_waiting_for < self._max_unresp_reqs can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t) if not can_send: error_msg = f"Runner could not send new requests for {self._response_wait_t}s" self._errors.append(error_msg) break callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names) client.async_infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, callback=callback, ) self._num_waiting_for += 1 # wait till receive all requested data with self._sync: def _all_processed(): LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs") return self._num_waiting_for == 0 self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S) if not self._processed_all: error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server" self._errors.append(error_msg) LOGGER.debug("Finished request thread") def _verify_triton_state(self, triton_client): errors = [] if not triton_client.is_server_live(): errors.append(f"Triton server {self._server_url} is not live") elif not triton_client.is_server_ready(): errors.append(f"Triton server {self._server_url} is not ready") elif not triton_client.is_model_ready(self._model_name, self._model_version): errors.append(f"Model {self._model_name}:{self._model_version} is not ready") return errors def _parse_args(): parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False) parser.add_argument( "--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)" ) parser.add_argument("--model-name", help="The name of the model used for inference.", required=True) parser.add_argument("--model-version", help="The version of the model used for inference.", required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved") parser.add_argument("--response-wait-time", required=False, help="Maximal time to wait for response", type=int, default=120) parser.add_argument( "--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", type=int, default=128 ) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) args = parser.parse_args() return args def main(): args = _parse_args() log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" log_level = logging.INFO if not args.verbose else logging.DEBUG logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) runner = AsyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, max_unresponded_reqs=args.max_unresponded_requests, ) with NpzWriter(output_dir=args.output_dir) as writer: start = time.time() for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10): data = _verify_and_format_dump(args, ids, x, y_pred, y_real) writer.write(**data) stop = time.time() LOGGER.info(f"\nThe inference took {stop - start:0.3f}s") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data if __name__ == "__main__": main()
TensorFlow/Detection/SSD/models/research/slim/nets/mobilenet
mobilenet
mobilenet_v2
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of Mobilenet V2. Architecture: https://arxiv.org/abs/1801.04381 The base model gives 72.2% accuracy on ImageNet, with 300MMadds, 3.4 M parameters. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import functools import tensorflow as tf from nets.mobilenet import conv_blocks as ops from nets.mobilenet import mobilenet as lib slim = tf.contrib.slim op = lib.op expand_input = ops.expand_input_by_factor # pyformat: disable # Architecture: https://arxiv.org/abs/1801.04381 V2_DEF = dict( defaults={ # Note: these parameters of batch norm affect the architecture # that's why they are here and not in training_scope. (slim.batch_norm,): {'center': True, 'scale': True}, (slim.conv2d, slim.fully_connected, slim.separable_conv2d): { 'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6 }, (ops.expanded_conv,): { 'expansion_size': expand_input(6), 'split_expansion': 1, 'normalizer_fn': slim.batch_norm, 'residual': True }, (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'} }, spec=[ op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]), op(ops.expanded_conv, expansion_size=expand_input(1, divisible_by=1), num_outputs=16), op(ops.expanded_conv, stride=2, num_outputs=24), op(ops.expanded_conv, stride=1, num_outputs=24), op(ops.expanded_conv, stride=2, num_outputs=32), op(ops.expanded_conv, stride=1, num_outputs=32), op(ops.expanded_conv, stride=1, num_outputs=32), op(ops.expanded_conv, stride=2, num_outputs=64), op(ops.expanded_conv, stride=1, num_outputs=64), op(ops.expanded_conv, stride=1, num_outputs=64), op(ops.expanded_conv, stride=1, num_outputs=64), op(ops.expanded_conv, stride=1, num_outputs=96), op(ops.expanded_conv, stride=1, num_outputs=96), op(ops.expanded_conv, stride=1, num_outputs=96), op(ops.expanded_conv, stride=2, num_outputs=160), op(ops.expanded_conv, stride=1, num_outputs=160), op(ops.expanded_conv, stride=1, num_outputs=160), op(ops.expanded_conv, stride=1, num_outputs=320), op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280) ], ) # pyformat: enable @slim.add_arg_scope def mobilenet(input_tensor, num_classes=1001, depth_multiplier=1.0, scope='MobilenetV2', conv_defs=None, finegrain_classification_mode=False, min_depth=None, divisible_by=None, activation_fn=None, **kwargs): """Creates mobilenet V2 network. Inference mode is created by default. To create training use training_scope below. with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()): logits, endpoints = mobilenet_v2.mobilenet(input_tensor) Args: input_tensor: The input tensor num_classes: number of classes depth_multiplier: The multiplier applied to scale number of channels in each layer. Note: this is called depth multiplier in the paper but the name is kept for consistency with slim's model builder. scope: Scope of the operator conv_defs: Allows to override default conv def. finegrain_classification_mode: When set to True, the model will keep the last layer large even for small multipliers. Following https://arxiv.org/abs/1801.04381 suggests that it improves performance for ImageNet-type of problems. *Note* ignored if final_endpoint makes the builder exit earlier. min_depth: If provided, will ensure that all layers will have that many channels after application of depth multiplier. divisible_by: If provided will ensure that all layers # channels will be divisible by this number. activation_fn: Activation function to use, defaults to tf.nn.relu6 if not specified. **kwargs: passed directly to mobilenet.mobilenet: prediction_fn- what prediction function to use. reuse-: whether to reuse variables (if reuse set to true, scope must be given). Returns: logits/endpoints pair Raises: ValueError: On invalid arguments """ if conv_defs is None: conv_defs = V2_DEF if 'multiplier' in kwargs: raise ValueError('mobilenetv2 doesn\'t support generic ' 'multiplier parameter use "depth_multiplier" instead.') if finegrain_classification_mode: conv_defs = copy.deepcopy(conv_defs) if depth_multiplier < 1: conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier if activation_fn: conv_defs = copy.deepcopy(conv_defs) defaults = conv_defs['defaults'] conv_defaults = ( defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)]) conv_defaults['activation_fn'] = activation_fn depth_args = {} # NB: do not set depth_args unless they are provided to avoid overriding # whatever default depth_multiplier might have thanks to arg_scope. if min_depth is not None: depth_args['min_depth'] = min_depth if divisible_by is not None: depth_args['divisible_by'] = divisible_by with slim.arg_scope((lib.depth_multiplier,), **depth_args): return lib.mobilenet( input_tensor, num_classes=num_classes, conv_defs=conv_defs, scope=scope, multiplier=depth_multiplier, **kwargs) mobilenet.default_image_size = 224 def wrapped_partial(func, *args, **kwargs): partial_func = functools.partial(func, *args, **kwargs) functools.update_wrapper(partial_func, func) return partial_func # Wrappers for mobilenet v2 with depth-multipliers. Be noticed that # 'finegrain_classification_mode' is set to True, which means the embedding # layer will not be shrinked when given a depth-multiplier < 1.0. mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4) mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50, finegrain_classification_mode=True) mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35, finegrain_classification_mode=True) @slim.add_arg_scope def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs): """Creates base of the mobilenet (no pooling and no logits) .""" return mobilenet(input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs) def training_scope(**kwargs): """Defines MobilenetV2 training scope. Usage: with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()): logits, endpoints = mobilenet_v2.mobilenet(input_tensor) with slim. Args: **kwargs: Passed to mobilenet.training_scope. The following parameters are supported: weight_decay- The weight decay to use for regularizing the model. stddev- Standard deviation for initialization, if negative uses xavier. dropout_keep_prob- dropout keep probability bn_decay- decay for the batch norm moving averages. Returns: An `arg_scope` to use for the mobilenet v2 model. """ return lib.training_scope(**kwargs) __all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp32/jasper-onnx
jasper-onnx
config
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. name: "jasper-onnx" platform: "onnxruntime_onnx" default_model_filename: "model.onnx" max_batch_size: 8#MAX_BATCH input [ { name: "input__0" data_type: TYPE_FP32 dims: [64, -1] } ] output [ { name: "output__0" data_type: TYPE_FP32 dims: [-1, 29 ] } ] instance_group { count: 1#NUM_ENGINES gpus: 0 kind: KIND_GPU } #db#dynamic_batching { #db# preferred_batch_size: 8#MAX_BATCH #db# max_queue_delay_microseconds: #MAX_QUEUE #db#}
PyTorch/Segmentation/nnUNet/data_loading
data_loading
data_module
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import numpy as np from pytorch_lightning import LightningDataModule from sklearn.model_selection import KFold from utils.utils import get_config_file, get_task_code, print0 from data_loading.dali_loader import fetch_dali_loader class DataModule(LightningDataModule): def __init__(self, args): super().__init__() self.args = args self.data_path = get_data_path(args) self.kfold = get_kfold_splitter(args.nfolds) self.kwargs = { "dim": self.args.dim, "seed": self.args.seed, "gpus": self.args.gpus, "nvol": self.args.nvol, "layout": self.args.layout, "overlap": self.args.overlap, "benchmark": self.args.benchmark, "num_workers": self.args.num_workers, "oversampling": self.args.oversampling, "test_batches": self.args.test_batches, "train_batches": self.args.train_batches, "invert_resampled_y": self.args.invert_resampled_y, "patch_size": get_config_file(self.args)["patch_size"], } self.train_imgs, self.train_lbls, self.val_imgs, self.val_lbls, self.test_imgs = ([],) * 5 def setup(self, stage=None): meta = load_data(self.data_path, "*_meta.npy") orig_lbl = load_data(self.data_path, "*_orig_lbl.npy") imgs, lbls = load_data(self.data_path, "*_x.npy"), load_data(self.data_path, "*_y.npy") self.test_imgs, test_meta = get_test_fnames(self.args, self.data_path, meta) if self.args.exec_mode != "predict" or self.args.benchmark: train_idx, val_idx = list(self.kfold.split(imgs))[self.args.fold] orig_lbl, meta = get_split(orig_lbl, val_idx), get_split(meta, val_idx) self.kwargs.update({"orig_lbl": orig_lbl, "meta": meta}) self.train_imgs, self.train_lbls = get_split(imgs, train_idx), get_split(lbls, train_idx) self.val_imgs, self.val_lbls = get_split(imgs, val_idx), get_split(lbls, val_idx) if self.args.gpus > 1: rank = int(os.getenv("LOCAL_RANK", "0")) self.val_imgs = self.val_imgs[rank :: self.args.gpus] self.val_lbls = self.val_lbls[rank :: self.args.gpus] else: self.kwargs.update({"meta": test_meta}) print0(f"{len(self.train_imgs)} training, {len(self.val_imgs)} validation, {len(self.test_imgs)} test examples") def train_dataloader(self): return fetch_dali_loader(self.train_imgs, self.train_lbls, self.args.batch_size, "train", **self.kwargs) def val_dataloader(self): return fetch_dali_loader(self.val_imgs, self.val_lbls, 1, "eval", **self.kwargs) def test_dataloader(self): if self.kwargs["benchmark"]: return fetch_dali_loader(self.train_imgs, self.train_lbls, self.args.val_batch_size, "test", **self.kwargs) return fetch_dali_loader(self.test_imgs, None, 1, "test", **self.kwargs) def get_split(data, idx): return list(np.array(data)[idx]) def load_data(path, files_pattern, non_empty=True): data = sorted(glob.glob(os.path.join(path, files_pattern))) if non_empty: assert len(data) > 0, f"No data found in {path} with pattern {files_pattern}" return data def get_kfold_splitter(nfolds): return KFold(n_splits=nfolds, shuffle=True, random_state=12345) def get_test_fnames(args, data_path, meta=None): kfold = get_kfold_splitter(args.nfolds) test_imgs = load_data(data_path, "*_x.npy", non_empty=False) if args.exec_mode == "predict" and "val" in data_path: _, val_idx = list(kfold.split(test_imgs))[args.fold] test_imgs = sorted(get_split(test_imgs, val_idx)) if meta is not None: meta = sorted(get_split(meta, val_idx)) return test_imgs, meta def get_data_path(args): if args.data != "/data": return args.data data_path = os.path.join(args.data, get_task_code(args)) if args.exec_mode == "predict" and not args.benchmark: data_path = os.path.join(data_path, "test") return data_path
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact_volta
dot_based_interact_volta
dot_based_interact_pytorch_types
#include <torch/extension.h> #include <torch/types.h> #include <stdexcept> #include "../dot_based_interact/dot_based_interact_fp16_fwd.cu" #include "../dot_based_interact/dot_based_interact_fp16_bwd.cu" #include "../dot_based_interact/dot_based_interact_fp32_fwd.cu" #include "../dot_based_interact/dot_based_interact_fp32_bwd.cu" torch::Tensor dotBasedInteractFwdTorch(torch::Tensor input, torch::Tensor bottom_mlp_output) { //input includes bottom_mlp_output along with the embeddings, at the first position auto size = input.sizes(); auto batch_size = size[0]; auto num_rows = size[1]; auto num_cols = size[2]; uint raw_output_size = ((num_rows * (num_rows - 1)) >> 1) + num_cols; uint output_size = ((raw_output_size-1)/8 + 1)*8; //round up to multiple of 8 int64_t outputShape[2] = {batch_size, output_size}; auto output = torch::empty(c10::IntArrayRef(outputShape), input.options()); if (input.scalar_type() == torch::ScalarType::Half && bottom_mlp_output.scalar_type() == torch::ScalarType::Half) { dotBasedInteractFwd(input.contiguous().data_ptr<at::Half>(), bottom_mlp_output.contiguous().data_ptr<at::Half>(), output.contiguous().data_ptr<at::Half>(), batch_size, num_rows, num_cols); } else if (input.scalar_type() == torch::ScalarType::Float && bottom_mlp_output.scalar_type() == torch::ScalarType::Float) { dotBasedInteractF32Fwd(input.contiguous().data_ptr<float>(), bottom_mlp_output.contiguous().data_ptr<float>(), output.contiguous().data_ptr<float>(), batch_size, num_rows, num_cols); } else { throw std::invalid_argument("Invalid input type."); } return output; } std::vector<torch::Tensor> dotBasedInteractBwdTorch(torch::Tensor input, torch::Tensor upstreamGrad) { auto size = input.sizes(); auto batch_size = size[0]; auto num_rows = size[1]; auto num_cols = size[2]; auto outputGrad = torch::empty_like(input); int64_t outputShape[2] = {batch_size, num_cols}; auto mlp_grad = torch::empty(c10::IntArrayRef(outputShape), input.options()); if (input.scalar_type() == torch::ScalarType::Half && upstreamGrad.scalar_type() == torch::ScalarType::Half) { dotBasedInteractBwd(input.contiguous().data_ptr<at::Half>(), upstreamGrad.contiguous().data_ptr<at::Half>(), outputGrad.contiguous().data_ptr<at::Half>(), mlp_grad.contiguous().data_ptr<at::Half>(), batch_size, num_rows, num_cols); } else if (input.scalar_type() == torch::ScalarType::Float && upstreamGrad.scalar_type() == torch::ScalarType::Float) { dotBasedInteractF32Bwd(input.contiguous().data_ptr<float>(), upstreamGrad.contiguous().data_ptr<float>(), outputGrad.contiguous().data_ptr<float>(), mlp_grad.contiguous().data_ptr<float>(), batch_size, num_rows, num_cols); } else { throw std::invalid_argument("Invalid input type."); } return {outputGrad, mlp_grad}; }
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils
utils
memory_manager
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pynvml import psutil class MemoryManager(object): def __init__(self, gpus=None): pynvml.nvmlInit() def __new__(cls): if not hasattr(cls, 'instance'): cls.instance = super(MemoryManager, cls).__new__(cls) return cls.instance def get_available_gpus(self): return pynvml.nvmlDeviceGetCount() def get_memory_info_on_gpu(self, gpu_id): h = pynvml.nvmlDeviceGetHandleByIndex(gpu_id) return pynvml.nvmlDeviceGetMemoryInfo(h) def get_min_available_across_gpus_memory(self, gpus): total = None used = 0 for g_id in range(gpus): info = self.get_memory_info_on_gpu(g_id) if total is None: total = info.total else: assert total == info.total used = max(used, info.used) return total - used def get_available_virtual_memory(self): return psutil.virtual_memory().available
TensorFlow2/LanguageModeling/BERT/data
data
GooglePretrainedWeightDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib import os import urllib.request import tarfile class GooglePretrainedWeightDownloader: def __init__(self, save_path): self.save_path = save_path + '/google_pretrained_weights' if not os.path.exists(self.save_path): os.makedirs(self.save_path) # Download urls self.model_urls = { 'bert_base_uncased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-12_H-768_A-12.tar.gz', 'uncased_L-12_H-768_A-12.tar.gz'), 'bert_large_uncased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/uncased_L-24_H-1024_A-16.tar.gz', 'uncased_L-24_H-1024_A-16.tar.gz'), # 'bert_base_cased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-12_H-768_A-12.tar.gz', 'cased_L-12_H-768_A-12.tar.gz'), # 'bert_large_cased': ('https://storage.googleapis.com/cloud-tpu-checkpoints/bert/keras_bert/cased_L-24_H-1024_A-16.tar.gz', 'cased_L-24_H-1024_A-16.tar.gz'), # 'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'), # 'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'), # 'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip') } # SHA256sum verification for file download integrity (and checking for changes from the download source over time) self.bert_base_uncased_sha = { 'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc', 'bert_model.ckpt.data-00000-of-00001': 'f8d2e9873133ea4d252662be01a074fb6b9e115d5fd1e3678d385cf65cf5210f', 'bert_model.ckpt.index': '06a6b8cdff0e61f62f8f24946a607aa6f5ad9b969c1b85363541ab144f80c767', # 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_large_uncased_sha = { 'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb', 'bert_model.ckpt.data-00000-of-00001': '9aa66efcbbbfd87fc173115c4f906a42a70d26ca4ca1e318358e4de81dbddb0b', 'bert_model.ckpt.index': '1811d5b68b2fd1a8c5d2961b2691eb626d75c4e789079eb1ba3649aa3fff7336', # 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3', } self.bert_base_cased_sha = { 'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc', 'bert_model.ckpt.data-00000-of-00001': 'ed0febc0fbcd2b7ef9f02112e00cb26c5de2086bca26c07b48b09c723446bc85', 'bert_model.ckpt.index': 'af085a027ef3686466c9b662f9174129401bb4bc49856c917c02322ab7ca26d5', 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_large_cased_sha = { 'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57', 'bert_model.ckpt.data-00000-of-00001': '1f96efeac7c8728e2bacb8ec6230f5ed42a26f5aa6b6b0a138778c190adf2a0b', 'bert_model.ckpt.index': '373ed159af87775ce549239649bfc4df825bffab0da31620575dab44818443c3', 'checkpoint': 'da4c827756174a576abc3490e385fa8a36600cf5eb7bbea29315cf1f4ad59639', 'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02', } self.bert_base_multilingual_cased_sha = { 'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0', 'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5', 'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37', 'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa', 'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c', } self.bert_large_multilingual_uncased_sha = { 'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624', 'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429', 'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7', 'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29', 'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f', } self.bert_base_chinese_sha = { 'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015', 'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba', 'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e', 'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047', 'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c', } # Relate SHA to urls for loop below self.model_sha = { 'bert_base_uncased': self.bert_base_uncased_sha, 'bert_large_uncased': self.bert_large_uncased_sha, # 'bert_base_cased': self.bert_base_cased_sha, # 'bert_large_cased': self.bert_large_cased_sha, # 'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha, # 'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha, # 'bert_base_chinese': self.bert_base_chinese_sha } # Helper to get sha256sum of a file def sha256sum(self, filename): h = hashlib.sha256() b = bytearray(128*1024) mv = memoryview(b) with open(filename, 'rb', buffering=0) as f: for n in iter(lambda : f.readinto(mv), 0): h.update(mv[:n]) return h.hexdigest() def download(self): # Iterate over urls: download, unzip, verify sha256sum found_mismatch_sha = False for model in self.model_urls: url = self.model_urls[model][0] file = self.save_path + '/' + self.model_urls[model][1] print('Downloading', url) response = urllib.request.urlopen(url) with open(file, 'wb') as handle: handle.write(response.read()) print('Unzipping', file) tf = tarfile.open(file) tf.extractall(self.save_path) sha_dict = self.model_sha[model] for extracted_file in sha_dict: sha = sha_dict[extracted_file] if sha != self.sha256sum(file[:-7] + '/' + extracted_file): found_mismatch_sha = True print('SHA256sum does not match on file:', extracted_file, 'from download url:', url) else: print(file[:-7] + '/' + extracted_file, '\t', 'verified') if not found_mismatch_sha: print("All downloads pass sha256sum verification.") def serialize(self): pass def deserialize(self): pass def listAvailableWeights(self): print("Available Weight Datasets") for item in self.model_urls: print(item) def listLocallyStoredWeights(self): pass
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
mask_rcnn_resnet50_atrous_coco
# Mask R-CNN with Resnet-50 (v1), Atrous version # Configured for MSCOCO Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer { min_dimension: 800 max_dimension: 1365 } } number_of_stages: 3 feature_extractor { type: 'faster_rcnn_resnet50' first_stage_features_stride: 8 } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 8 width_stride: 8 } } first_stage_atrous_rate: 2 first_stage_box_predictor_conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } first_stage_nms_score_threshold: 0.0 first_stage_nms_iou_threshold: 0.7 first_stage_max_proposals: 300 first_stage_localization_loss_weight: 2.0 first_stage_objectness_loss_weight: 1.0 initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { use_dropout: false dropout_keep_probability: 1.0 predict_instance_masks: true mask_height: 33 mask_width: 33 mask_prediction_conv_depth: 0 mask_prediction_num_conv_layers: 4 fc_hyperparams { op: FC regularizer { l2_regularizer { weight: 0.0 } } initializer { variance_scaling_initializer { factor: 1.0 uniform: true mode: FAN_AVG } } } conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.0 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } second_stage_localization_loss_weight: 2.0 second_stage_classification_loss_weight: 1.0 second_stage_mask_prediction_loss_weight: 4.0 } } train_config: { batch_size: 1 optimizer { momentum_optimizer: { learning_rate: { manual_step_learning_rate { initial_learning_rate: 0.0003 schedule { step: 900000 learning_rate: .00003 } schedule { step: 1200000 learning_rate: .000003 } } } momentum_optimizer_value: 0.9 } use_moving_average: false } gradient_clipping_by_norm: 10.0 fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" load_instance_masks: true mask_type: PNG_MASKS } eval_config: { num_examples: 8000 # Note: The below line limits the evaluation process to 10 evaluations. # Remove the below line to evaluate indefinitely. max_evals: 10 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" load_instance_masks: true mask_type: PNG_MASKS shuffle: false num_readers: 1 }
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
vrd_evaluation_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow_models.object_detection.utils.vrd_evaluation.""" import numpy as np import tensorflow as tf from object_detection.core import standard_fields from object_detection.utils import vrd_evaluation class VRDRelationDetectionEvaluatorTest(tf.test.TestCase): def test_vrdrelation_evaluator(self): self.vrd_eval = vrd_evaluation.VRDRelationDetectionEvaluator() image_key1 = 'img1' groundtruth_box_tuples1 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples1 = np.array( [(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type) groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int) self.vrd_eval.add_single_ground_truth_image_info( image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples1, standard_fields.InputDataFields.groundtruth_image_classes: groundtruth_verified_labels1 }) image_key2 = 'img2' groundtruth_box_tuples2 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples2 = np.array( [(1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key2, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples2, }) image_key3 = 'img3' groundtruth_box_tuples3 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples3 = np.array( [(1, 2, 4)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key3, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples3, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples3, }) image_key = 'img1' detected_box_tuples = np.array( [([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2]), ([0.5, 0, 1, 1], [1, 1, 3, 3])], dtype=vrd_evaluation.vrd_box_data_type) detected_class_tuples = np.array( [(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.vrd_eval.add_single_detected_image_info( image_key, { standard_fields.DetectionResultFields.detection_boxes: detected_box_tuples, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_tuples }) metrics = self.vrd_eval.evaluate() self.assertAlmostEqual(metrics['VRDMetric_Relationships_weightedAP@0.5IOU'], 0.25) self.assertAlmostEqual(metrics['VRDMetric_Relationships_mAP@0.5IOU'], 0.1666666666666666) self.assertAlmostEqual(metrics['VRDMetric_Relationships_AP@0.5IOU/3'], 0.3333333333333333) self.assertAlmostEqual(metrics['VRDMetric_Relationships_AP@0.5IOU/4'], 0) self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@50@0.5IOU'], 0.25) self.assertAlmostEqual(metrics['VRDMetric_Relationships_Recall@100@0.5IOU'], 0.25) self.vrd_eval.clear() self.assertFalse(self.vrd_eval._image_ids) class VRDPhraseDetectionEvaluatorTest(tf.test.TestCase): def test_vrdphrase_evaluator(self): self.vrd_eval = vrd_evaluation.VRDPhraseDetectionEvaluator() image_key1 = 'img1' groundtruth_box_tuples1 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples1 = np.array( [(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type) groundtruth_verified_labels1 = np.array([1, 2, 3, 4, 5], dtype=int) self.vrd_eval.add_single_ground_truth_image_info( image_key1, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples1, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples1, standard_fields.InputDataFields.groundtruth_image_classes: groundtruth_verified_labels1 }) image_key2 = 'img2' groundtruth_box_tuples2 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples2 = np.array( [(1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key2, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples2, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples2, }) image_key3 = 'img3' groundtruth_box_tuples3 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples3 = np.array( [(1, 2, 4)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key3, { standard_fields.InputDataFields.groundtruth_boxes: groundtruth_box_tuples3, standard_fields.InputDataFields.groundtruth_classes: groundtruth_class_tuples3, }) image_key = 'img1' detected_box_tuples = np.array( [([0, 0.3, 0.5, 0.5], [0.3, 0.3, 1.0, 1.0]), ([0, 0, 1.2, 1.2], [0.0, 0.0, 2.0, 2.0]), ([0.5, 0, 1, 1], [1, 1, 3, 3])], dtype=vrd_evaluation.vrd_box_data_type) detected_class_tuples = np.array( [(1, 2, 5), (1, 2, 3), (1, 6, 3)], dtype=vrd_evaluation.label_data_type) detected_scores = np.array([0.7, 0.8, 0.9], dtype=float) self.vrd_eval.add_single_detected_image_info( image_key, { standard_fields.DetectionResultFields.detection_boxes: detected_box_tuples, standard_fields.DetectionResultFields.detection_scores: detected_scores, standard_fields.DetectionResultFields.detection_classes: detected_class_tuples }) metrics = self.vrd_eval.evaluate() self.assertAlmostEqual(metrics['VRDMetric_Phrases_weightedAP@0.5IOU'], 0.25) self.assertAlmostEqual(metrics['VRDMetric_Phrases_mAP@0.5IOU'], 0.1666666666666666) self.assertAlmostEqual(metrics['VRDMetric_Phrases_AP@0.5IOU/3'], 0.3333333333333333) self.assertAlmostEqual(metrics['VRDMetric_Phrases_AP@0.5IOU/4'], 0) self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@50@0.5IOU'], 0.25) self.assertAlmostEqual(metrics['VRDMetric_Phrases_Recall@100@0.5IOU'], 0.25) self.vrd_eval.clear() self.assertFalse(self.vrd_eval._image_ids) class VRDDetectionEvaluationTest(tf.test.TestCase): def setUp(self): self.vrd_eval = vrd_evaluation._VRDDetectionEvaluation( matching_iou_threshold=0.5) image_key1 = 'img1' groundtruth_box_tuples1 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2]), ([0, 0, 1, 1], [1, 2, 2, 3])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples1 = np.array( [(1, 2, 3), (1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key1, groundtruth_box_tuples1, groundtruth_class_tuples1) image_key2 = 'img2' groundtruth_box_tuples2 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples2 = np.array( [(1, 4, 3)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key2, groundtruth_box_tuples2, groundtruth_class_tuples2) image_key3 = 'img3' groundtruth_box_tuples3 = np.array( [([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) groundtruth_class_tuples3 = np.array( [(1, 2, 4)], dtype=vrd_evaluation.label_data_type) self.vrd_eval.add_single_ground_truth_image_info( image_key3, groundtruth_box_tuples3, groundtruth_class_tuples3) image_key = 'img1' detected_box_tuples = np.array( [([0, 0.3, 1, 1], [1.1, 1, 2, 2]), ([0, 0, 1, 1], [1, 1, 2, 2])], dtype=vrd_evaluation.vrd_box_data_type) detected_class_tuples = np.array( [(1, 2, 3), (1, 2, 3)], dtype=vrd_evaluation.label_data_type) detected_scores = np.array([0.7, 0.8], dtype=float) self.vrd_eval.add_single_detected_image_info( image_key, detected_box_tuples, detected_scores, detected_class_tuples) metrics = self.vrd_eval.evaluate() expected_weighted_average_precision = 0.25 expected_mean_average_precision = 0.16666666666666 expected_precision = np.array([1., 0.5], dtype=float) expected_recall = np.array([0.25, 0.25], dtype=float) expected_recall_50 = 0.25 expected_recall_100 = 0.25 expected_median_rank_50 = 0 expected_median_rank_100 = 0 self.assertAlmostEqual(expected_weighted_average_precision, metrics.weighted_average_precision) self.assertAlmostEqual(expected_mean_average_precision, metrics.mean_average_precision) self.assertAlmostEqual(expected_mean_average_precision, metrics.mean_average_precision) self.assertAllClose(expected_precision, metrics.precisions) self.assertAllClose(expected_recall, metrics.recalls) self.assertAlmostEqual(expected_recall_50, metrics.recall_50) self.assertAlmostEqual(expected_recall_100, metrics.recall_100) self.assertAlmostEqual(expected_median_rank_50, metrics.median_rank_50) self.assertAlmostEqual(expected_median_rank_100, metrics.median_rank_100) if __name__ == '__main__': tf.test.main()
PyTorch/Classification/ConvNets/se-resnext101-32x4d/training/FP32
FP32
DGX1V_se-resnext101-32x4d_FP32_250E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision FP32 --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow
waveglow
waveGlowBuilder
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "waveGlowBuilder.h" #include "logging.h" #include "trtUtils.h" #include "NvInfer.h" #include "NvOnnxParser.h" #include "cuda_runtime.h" #include <cassert> #include <iostream> #include <stdexcept> #include <string> using namespace nvinfer1; using IParser = nvonnxparser::IParser; namespace tts { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const char* const ENGINE_NAME = "waveglow_chunk160_fp16"; constexpr const char* const MEL_INPUT_NAME = "spect"; constexpr const char* const Z_INPUT_NAME = "z"; constexpr const char* const OUTPUT_NAME = "audio"; } // namespace /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ WaveGlowBuilder::WaveGlowBuilder(const std::string& modelPath, std::shared_ptr<ILogger> logger) : mOnnxModelPath(modelPath) , mLogger(logger) { } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ TRTPtr<ICudaEngine> WaveGlowBuilder::build( IBuilder& builder, const int maxBatchSize, const bool useFP16) { // configure tensor-rt objects TRTPtr<INetworkDefinition> network(builder.createNetworkV2( 1U << static_cast<int>( NetworkDefinitionCreationFlag::kEXPLICIT_BATCH))); network->setName("WaveGlow"); TRTPtr<IParser> parser{nvonnxparser::createParser(*network, *mLogger)}; if (!parser->parseFromFile(mOnnxModelPath.c_str(), static_cast<int>(ILogger::Severity::kERROR))) { throw std::runtime_error("Failed to parse ONNX network. Parser failed."); } if (network->getOutput(0) == nullptr) { throw std::runtime_error("Failed to parse ONNX network. Null output."); } // set all inputs to FP32 for (int i = 0; i < network->getNbInputs(); ++i) { if (network->getInput(i)->getType() != DataType::kFLOAT) { network->getInput(i)->setType(DataType::kFLOAT); if (network->getInput(i)->getType() != DataType::kFLOAT) { throw std::runtime_error("WaveGlowBuilder expects non 32-bit input for " + std::to_string(i)); } } } // set output to FP32 and name ITensor* output = network->getOutput(0); if (output->getType() == DataType::kHALF) { // convert from half to full network->unmarkOutput(*output); IIdentityLayer* const identLayer = network->addIdentity(*output); identLayer->setPrecision(DataType::kFLOAT); output = identLayer->getOutput(0); assert(output->getType() == DataType::kFLOAT); network->markOutput(*output); std::cout << "Changing output to be 32-bit" << std::endl; } output->setName(OUTPUT_NAME); // rename z network->getInput(1)->setName(Z_INPUT_NAME); // add transpose to mel spectrogram ITensor* const spectInput = network->getInput(0); spectInput->setName(MEL_INPUT_NAME); TRTPtr<IBuilderConfig> config(builder.createBuilderConfig()); config->setMaxWorkspaceSize(1ULL << 29); if (useFP16) { config->setFlag(BuilderFlag::kFP16); } Dims minSpectDims = spectInput->getDimensions(); minSpectDims.d[0] = 1; Dims maxSpectDims = minSpectDims; maxSpectDims.d[0] = maxBatchSize; TRTUtils::printDimensions("spect", minSpectDims); TRTUtils::printDimensions("spect", maxSpectDims); IOptimizationProfile* const optProfile = builder.createOptimizationProfile(); optProfile->setDimensions(MEL_INPUT_NAME, OptProfileSelector::kMIN, minSpectDims); optProfile->setDimensions(MEL_INPUT_NAME, OptProfileSelector::kMAX, maxSpectDims); optProfile->setDimensions(MEL_INPUT_NAME, OptProfileSelector::kOPT, minSpectDims); config->addOptimizationProfile(optProfile); TRTPtr<ICudaEngine> engine( builder.buildEngineWithConfig(*network, *config)); if (!engine) { throw std::runtime_error("Failed to build WaveGlow engine."); } return engine; } } // namespace tts
PyTorch/SpeechSynthesis/FastPitch/common/text
text
acronyms
import re from . import cmudict _letter_to_arpabet = { 'A': 'EY1', 'B': 'B IY1', 'C': 'S IY1', 'D': 'D IY1', 'E': 'IY1', 'F': 'EH1 F', 'G': 'JH IY1', 'H': 'EY1 CH', 'I': 'AY1', 'J': 'JH EY1', 'K': 'K EY1', 'L': 'EH1 L', 'M': 'EH1 M', 'N': 'EH1 N', 'O': 'OW1', 'P': 'P IY1', 'Q': 'K Y UW1', 'R': 'AA1 R', 'S': 'EH1 S', 'T': 'T IY1', 'U': 'Y UW1', 'V': 'V IY1', 'X': 'EH1 K S', 'Y': 'W AY1', 'W': 'D AH1 B AH0 L Y UW0', 'Z': 'Z IY1', 's': 'Z' } # Acronyms that should not be expanded hardcoded_acronyms = [ 'BMW', 'MVD', 'WDSU', 'GOP', 'UK', 'AI', 'GPS', 'BP', 'FBI', 'HD', 'CES', 'LRA', 'PC', 'NBA', 'BBL', 'OS', 'IRS', 'SAC', 'UV', 'CEO', 'TV', 'CNN', 'MSS', 'GSA', 'USSR', 'DNA', 'PRS', 'TSA', 'US', 'GPU', 'USA', 'FPCC', 'CIA'] # Words and acronyms that should be read as regular words, e.g., NATO, HAPPY, etc. uppercase_whiteliset = [] acronyms_exceptions = { 'NVIDIA': 'N.VIDIA', } non_uppercase_exceptions = { 'email': 'e-mail', } # must ignore roman numerals _acronym_re = re.compile(r'([a-z]*[A-Z][A-Z]+)s?\.?') _non_uppercase_re = re.compile(r'\b({})\b'.format('|'.join(non_uppercase_exceptions.keys())), re.IGNORECASE) def _expand_acronyms_to_arpa(m, add_spaces=True): acronym = m.group(0) # remove dots if they exist acronym = re.sub('\.', '', acronym) acronym = "".join(acronym.split()) arpabet = cmudict.lookup(acronym) if arpabet is None: acronym = list(acronym) arpabet = ["{" + _letter_to_arpabet[letter] + "}" for letter in acronym] # temporary fix if arpabet[-1] == '{Z}' and len(arpabet) > 1: arpabet[-2] = arpabet[-2][:-1] + ' ' + arpabet[-1][1:] del arpabet[-1] arpabet = ' '.join(arpabet) elif len(arpabet) == 1: arpabet = "{" + arpabet[0] + "}" else: arpabet = acronym return arpabet def normalize_acronyms(text): text = re.sub(_acronym_re, _expand_acronyms_to_arpa, text) return text def expand_acronyms(m): text = m.group(1) if text in acronyms_exceptions: text = acronyms_exceptions[text] elif text in uppercase_whiteliset: text = text else: text = '.'.join(text) + '.' if 's' in m.group(0): text = text + '\'s' if text[-1] != '.' and m.group(0)[-1] == '.': return text + '.' else: return text def spell_acronyms(text): text = re.sub(_non_uppercase_re, lambda m: non_uppercase_exceptions[m.group(0).lower()], text) text = re.sub(_acronym_re, expand_acronyms, text) return text
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp32/jasper-ts-trace-ensemble
jasper-ts-trace-ensemble
config
name: "jasper-ts-trace-ensemble" platform: "ensemble" max_batch_size: 8#MAX_BATCH input { name: "AUDIO_SIGNAL" data_type: TYPE_FP32 dims: -1#AUDIO_LENGTH } input { name: "NUM_SAMPLES" data_type: TYPE_INT32 dims: [ 1 ] } output { name: "TRANSCRIPT" data_type: TYPE_INT32 dims: [-1] } ensemble_scheduling { step { model_name: "feature-extractor-ts-trace" model_version: -1 input_map { key: "input__0" value: "AUDIO_SIGNAL" } input_map { key: "input__1" value: "NUM_SAMPLES" } output_map { key: "output__0" value: "AUDIO_FEATURES" } } step { model_name: "jasper-ts-trace" model_version: -1 input_map { key: "input__0" value: "AUDIO_FEATURES" } output_map { key: "output__0" value: "CHARACTER_PROBABILITIES" } } step { model_name: "decoder-ts-script" model_version: -1 input_map { key: "input__0" value: "CHARACTER_PROBABILITIES" } output_map { key: "output__0" value: "TRANSCRIPT" } } }
PyTorch/SpeechSynthesis/Tacotron2/waveglow
waveglow
entrypoints
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import urllib.request import torch import os import sys # from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py def checkpoint_from_distributed(state_dict): """ Checks whether checkpoint was generated by DistributedDataParallel. DDP wraps model in additional "module.", it needs to be unwrapped for single GPU inference. :param state_dict: model's state dict """ ret = False for key, _ in state_dict.items(): if key.find('module.') != -1: ret = True break return ret # from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py def unwrap_distributed(state_dict): """ Unwraps model from DistributedDataParallel. DDP wraps model in additional "module.", it needs to be removed for single GPU inference. :param state_dict: model's state dict """ new_state_dict = {} for key, value in state_dict.items(): new_key = key.replace('module.1.', '') new_key = new_key.replace('module.', '') new_state_dict[new_key] = value return new_state_dict def _download_checkpoint(checkpoint, force_reload): model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints') if not os.path.exists(model_dir): os.makedirs(model_dir) ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint)) if not os.path.exists(ckpt_file) or force_reload: sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint)) urllib.request.urlretrieve(checkpoint, ckpt_file) return ckpt_file def nvidia_waveglow(pretrained=True, **kwargs): """Constructs a WaveGlow model (nn.module with additional infer(input) method). For detailed information on model input and output, training recipies, inference and performance visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com Args: pretrained (bool): If True, returns a model pretrained on LJ Speech dataset. model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16') """ from waveglow import model as waveglow fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16" force_reload = "force_reload" in kwargs and kwargs["force_reload"] if pretrained: if fp16: checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_amp/versions/19.09.0/files/nvidia_waveglowpyt_fp16_20190427' else: checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_fp32/versions/19.09.0/files/nvidia_waveglowpyt_fp32_20190427' ckpt_file = _download_checkpoint(checkpoint, force_reload) ckpt = torch.load(ckpt_file) state_dict = ckpt['state_dict'] if checkpoint_from_distributed(state_dict): state_dict = unwrap_distributed(state_dict) config = ckpt['config'] else: config = {'n_mel_channels': 80, 'n_flows': 12, 'n_group': 8, 'n_early_every': 4, 'n_early_size': 2, 'WN_config': {'n_layers': 8, 'kernel_size': 3, 'n_channels': 512}} for k,v in kwargs.items(): if k in config.keys(): config[k] = v elif k in config['WN_config'].keys(): config['WN_config'][k] = v m = waveglow.WaveGlow(**config) if pretrained: m.load_state_dict(state_dict) return m
PyTorch/SpeechRecognition/Jasper/triton
triton
jasper-client
#!/usr/bin/python # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import argparse import numpy as np import os from speech_utils import AudioSegment, SpeechClient import soundfile import pyaudio as pa import threading import math import time import glob FLAGS = None # read audio chunk from a file def get_audio_chunk_from_soundfile(sf, chunk_size, int_values): dtype = 'int32' if int_values else 'float32' audio_signal = sf.read(chunk_size, dtype=dtype) end = False # pad to chunk size if len(audio_signal) < chunk_size: end = True audio_signal = np.pad(audio_signal, (0, chunk_size-len( audio_signal)), mode='constant') return audio_signal, end # generator that returns chunks of audio data from file def audio_generator_from_file(input_filename, target_sr, int_values, chunk_duration): sf = soundfile.SoundFile(input_filename, 'rb') chunk_size = int(chunk_duration*sf.samplerate) start = True end = False while not end: audio_signal, end = get_audio_chunk_from_soundfile( sf, chunk_size, int_values) audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr) yield audio_segment.samples, target_sr, start, end start = False sf.close() # generator that returns chunks of audio data from file class AudioGeneratorFromMicrophone: def __init__(self,input_device_id, target_sr, chunk_duration): self.recording_state = "init" self.target_sr = target_sr self.chunk_duration = chunk_duration self.p = pa.PyAudio() device_info = self.p.get_host_api_info_by_index(0) num_devices = device_info.get('deviceCount') devices = {} for i in range(0, num_devices): if (self.p.get_device_info_by_host_api_device_index(0, i).get( 'maxInputChannels')) > 0: devices[i] = self.p.get_device_info_by_host_api_device_index( 0, i) if (len(devices) == 0): raise RuntimeError("Cannot find any valid input devices") if input_device_id is None or input_device_id not in \ devices.keys(): print("\nInput Devices:") for id, info in devices.items(): print("{}: {}".format(id,info.get("name"))) input_device_id = int(input("Enter device id to use: ")) self.input_device_id = input_device_id def generate_audio(self): chunk_size = int(self.chunk_duration*self.target_sr) self. recording_state = "init" def keyboard_listener(): input("Press Enter to start and end recording...") self.recording_state = "capture" print("Recording...") input("") self.recording_state = "release" listener = threading.Thread(target=keyboard_listener) listener.start() start = True end = False stream_initialized = False step = 0 while self.recording_state != "release": try: if self.recording_state == "capture": if not stream_initialized: stream = self.p.open( format=pa.paInt16, channels=1, rate=self.target_sr, input=True, input_device_index=self.input_device_id, frames_per_buffer=chunk_size) stream_initialized = True # Read audio chunk from microphone audio_signal = stream.read(chunk_size) audio_signal = np.frombuffer(audio_signal,dtype=np.int16) audio_segment = AudioSegment(audio_signal, self.target_sr, self.target_sr) yield audio_segment.samples, self.target_sr, start, end start = False step += 1 except Exception as e: print(e) break stream.close() self.p.terminate() def generate_audio_signal(self): #chunk_size = int(self.chunk_duration*self.target_sr) chunk_size = int(0.2*self.target_sr) self. recording_state = "init" def keyboard_listener(): input("Press Enter to start and end recording...") self.recording_state = "capture" print("Recording...") input("") self.recording_state = "release" listener = threading.Thread(target=keyboard_listener) listener.start() audio_samples = [] stream_initialized = False step = 0 while self.recording_state != "release": try: if self.recording_state == "capture": if not stream_initialized: stream = self.p.open( format=pa.paInt16, channels=1, rate=self.target_sr, input=True, input_device_index=self.input_device_id, frames_per_buffer=chunk_size) stream_initialized = True # Read audio chunk from microphone audio_signal = stream.read(chunk_size) audio_signal = np.frombuffer(audio_signal,dtype=np.int16) audio_segment = AudioSegment(audio_signal, self.target_sr, self.target_sr) if step == 0: audio_samples = audio_segment.samples else: audio_samples = np.concatenate((audio_samples, audio_segment.samples)) start = False step += 1 except Exception as e: print(e) break stream.close() self.p.terminate() return audio_samples # generator that returns chunks of audio features from file def audio_features_generator(input_filename, speech_features_params, target_sr, int_values, chunk_duration): sf = soundfile.SoundFile(input_filename, 'rb') chunk_size = int(chunk_duration*sf.samplerate) start = True end = False while not end: audio_signal, end = get_audio_chunk_from_soundfile(sf, chunk_size, int_values) audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr) audio_features, features_length = get_speech_features( audio_segment.samples, target_sr, speech_features_params) yield audio_features, start, end start = False sf.close() def audio_features_generator_with_buffer(input_filename, speech_features_params, target_sr, int_values, chunk_duration): sf = soundfile.SoundFile(input_filename, 'rb') chunk_size = int(chunk_duration*sf.samplerate) start = True end = False audio_signal = np.zeros(shape=3*chunk_size, dtype=np.float32) while not end: audio_signal[-chunk_size:], end = get_audio_chunk_from_soundfile(sf, chunk_size, int_values) audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr) audio_features, features_length = get_speech_features( audio_segment.samples, target_sr, speech_features_params) yield audio_features, start, end start = False audio_signal[:-chunk_size] = audio_signal[chunk_size:] sf.close() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False, help='Enable verbose output') parser.add_argument('--fixed_size', type=int, required=False, default=0, help="send fixed_size requests, pad or truncate") parser.add_argument('--batch_size', type=int, required=False, default=1, help='batch size') parser.add_argument('--model_platform', required=False, default='trt', help='Jasper model platform') parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8000', help='Inference server URL. Default is ' 'localhost:8000.') parser.add_argument('-i', '--protocol', type=str, required=False, default='HTTP', help='Protocol (HTTP/gRPC) used to communicate with ' 'inference service. Default is HTTP.') parser.add_argument('--audio_filename', type=str, required=False, default=None, help='Input audio filename') parser.add_argument('--data_dir', type=str, required=False, default=None, help='data directory') parser.add_argument('--manifest_filename', type=str, required=False, default=None, help='relative manifest paths to --data_dir directory.') FLAGS = parser.parse_args() protocol = FLAGS.protocol.lower() valid_model_platforms = {"ts-trace","onnx", "tensorrt"} if FLAGS.model_platform not in valid_model_platforms: raise ValueError("Invalid model_platform {}. Valid choices are {" "}".format(FLAGS.model_platform, valid_model_platforms)) model_name = "jasper-" + FLAGS.model_platform + "-ensemble" speech_client = SpeechClient( FLAGS.url, protocol, model_name, 1, FLAGS.batch_size, model_platform=FLAGS.model_platform, verbose=FLAGS.verbose, mode="synchronous", from_features=False ) filenames = [] transcripts = [] if FLAGS.audio_filename is not None: audio_file = os.path.join(FLAGS.data_dir, FLAGS.audio_filename) if os.path.isdir(audio_file): filenames = glob.glob(os.path.join(os.path.abspath(audio_file), "**", "*.wav"), recursive=True) else: filenames = [audio_file] elif FLAGS.manifest_filename is not None: filter_speed=1.0 data_dir=FLAGS.data_dir labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"] labels_map = dict([(labels[i], i) for i in range(len(labels))]) blank_index = len(labels)-1 table = None import string punctuation = string.punctuation punctuation = punctuation.replace("+", "") punctuation = punctuation.replace("&", "") table = str.maketrans(punctuation, " " * len(punctuation)) import json if "./triton" not in sys.path: sys.path.append("./") sys.path.append("./triton") from speech_utils import normalize_string, parse_transcript FLAGS.manifest_filename = FLAGS.manifest_filename.split(',') for manifest in FLAGS.manifest_filename: manifest=os.path.join(data_dir, manifest) print(manifest) with open(manifest, "r", encoding="utf-8") as fh: a=json.load(fh) for data in a: files_and_speeds = data['files'] audio_path = [x['fname'] for x in files_and_speeds if x['speed'] == filter_speed][0] filenames.append(os.path.join(data_dir, audio_path)) transcript_text = data['transcript'] transcript_text = normalize_string(transcript_text, labels=labels, table=table) transcripts.append(transcript_text) #parse_transcript(transcript_text, labels_map, blank_index)) # convert to vocab indices # Read the audio files # Group requests in batches audio_idx = 0 last_request = False predictions = [] while not last_request: batch_audio_samples = [] batch_filenames = [] for idx in range(FLAGS.batch_size): filename = filenames[audio_idx] print("Reading audio file: ", filename) audio = AudioSegment.from_file( filename, offset=0, duration=FLAGS.fixed_size).samples if FLAGS.fixed_size: audio = np.resize(audio, FLAGS.fixed_size) audio_idx = (audio_idx + 1) % len(filenames) if audio_idx == 0: last_request = True batch_audio_samples.append(audio) batch_filenames.append(filename) predictions += speech_client.recognize( batch_audio_samples, batch_filenames) if transcripts: predictions = [x for l in predictions for x in l ] from metrics import word_error_rate wer, scores, num_words = word_error_rate(predictions, transcripts) print(wer)
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers
samplers
iteration_based_batch_sampler
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch.utils.data.sampler import BatchSampler class IterationBasedBatchSampler(BatchSampler): """ Wraps a BatchSampler, resampling from it until a specified number of iterations have been sampled """ def __init__(self, batch_sampler, num_iterations, start_iter=0): self.batch_sampler = batch_sampler self.num_iterations = num_iterations self.start_iter = start_iter def __iter__(self): iteration = self.start_iter while iteration <= self.num_iterations: # if the underlying sampler has a set_epoch method, like # DistributedSampler, used for making each process see # a different split of the dataset, then set it if hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(iteration) for batch in self.batch_sampler: iteration += 1 if iteration > self.num_iterations: break yield batch def __len__(self): return self.num_iterations
PyTorch/LanguageModeling/BERT/triton/dist6l/runner
runner
start_NVIDIA-DGX-1-(1x-V100-32GB)
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Install Docker . /etc/os-release && \ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \ curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \ curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \ apt-get update && \ apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2 # Install packages pip install -r triton/runner/requirements.txt # Evaluate Runner python3 -m "triton.dist6l.runner.__main__" \ --config-path "triton/dist6l/runner/config_NVIDIA-DGX-1-(1x-V100-32GB).yaml" \ --device 0
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin
taco2ModulationRemovalPlugin
taco2ModulationRemovalLayerPluginCreator
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ModulationRemovalLayerPluginCreator.h" #include "taco2ModulationRemovalLayerPlugin.h" #include <stdexcept> #include <vector> using namespace nvinfer1; namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const char* const INPUTLENGTH_STR = "InputLength"; constexpr const char* const FILTERLENGTH_STR = "FilterLength"; constexpr const char* const HOPLENGTH_STR = "HopLength"; constexpr const char* const WEIGHTS_STR = "Weights"; } // namespace /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ PluginFieldCollection* Taco2ModulationRemovalLayerPluginCreator::getFields() { static PluginFieldCollection* pluginPtr = nullptr; static const std::vector<PluginField> fields{ {INPUTLENGTH_STR, nullptr, PluginFieldType::kINT32, 0}, {FILTERLENGTH_STR, nullptr, PluginFieldType::kINT32, 0}, {HOPLENGTH_STR, nullptr, PluginFieldType::kINT32, 0}, {WEIGHTS_STR, nullptr, PluginFieldType::kFLOAT32, 0}, }; if (!pluginPtr) { pluginPtr = static_cast<PluginFieldCollection*>(malloc(sizeof(*pluginPtr) + fields.size() * sizeof(PluginField))); pluginPtr->nbFields = static_cast<int>(fields.size()); pluginPtr->fields = fields.data(); } return pluginPtr; } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2ModulationRemovalLayerPluginCreator::Taco2ModulationRemovalLayerPluginCreator() : mNamespace() { // do nothing } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ const char* Taco2ModulationRemovalLayerPluginCreator::getPluginName() const { return Taco2ModulationRemovalLayerPlugin::getName(); } const char* Taco2ModulationRemovalLayerPluginCreator::getPluginVersion() const { return Taco2ModulationRemovalLayerPlugin::getVersion(); } const PluginFieldCollection* Taco2ModulationRemovalLayerPluginCreator::getFieldNames() { return getFields(); } IPluginV2* Taco2ModulationRemovalLayerPluginCreator::createPlugin( const char* const /*name*/, const PluginFieldCollection* fc) { int inputLength = 0; int filterLength = 0; int hopLength = 0; Weights weights{DataType::kFLOAT, nullptr, 0}; for (int i = 0; i < fc->nbFields; ++i) { const std::string name(fc->fields[i].name); if (name == INPUTLENGTH_STR) { inputLength = static_cast<const int32_t*>(fc->fields[i].data)[0]; } else if (name == FILTERLENGTH_STR) { filterLength = static_cast<const int32_t*>(fc->fields[i].data)[0]; } else if (name == HOPLENGTH_STR) { hopLength = static_cast<const int32_t*>(fc->fields[i].data)[0]; } else if (name == WEIGHTS_STR) { weights.values = fc->fields[i].data; weights.count = fc->fields[i].length; } else { throw std::runtime_error("Unknown plugin field: '" + name + "'"); } } return new Taco2ModulationRemovalLayerPlugin(weights, inputLength, filterLength, hopLength); } IPluginV2* Taco2ModulationRemovalLayerPluginCreator::deserializePlugin( const char* const /* layerName */, const void* const serialData, size_t const serialLength) { return new Taco2ModulationRemovalLayerPlugin( Taco2ModulationRemovalLayerPlugin::deserialize(serialData, serialLength)); } void Taco2ModulationRemovalLayerPluginCreator::setPluginNamespace(const char* pluginNamespace) { mNamespace = pluginNamespace; } const char* Taco2ModulationRemovalLayerPluginCreator::getPluginNamespace() const { return mNamespace.c_str(); } } // namespace plugin } // namespace nvinfer1
PyTorch/Classification/ConvNets/resnext101-32x4d/training/TF32
TF32
DGXA100_resnext101-32x4d_TF32_250E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnext101-32x4d --precision TF32 --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data
data
dictionary
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import Counter from multiprocessing import Pool import torch from common.fairseq import utils from common.fairseq.data import data_utils from common.fairseq.file_chunker_utils import Chunker, find_offsets from common.fairseq.file_io import PathManager from common.fairseq.tokenizer import tokenize_line class Dictionary: """A mapping from symbols to consecutive integers""" def __init__( self, *, # begin keyword-only arguments bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", extra_special_symbols=None, ): self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def get_count(self, idx): return self.count[idx] def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def __contains__(self, sym): return sym in self.indices def index(self, sym): """Returns the index of the specified symbol""" assert isinstance(sym, str) if sym in self.indices: return self.indices[sym] return self.unk_index def string( self, tensor, bpe_symbol=None, escape_unk=False, extra_symbols_to_ignore=None, unk_string=None, include_eos=False, separator=" ", ): """Helper for converting a tensor of token indices to a string. Can optionally remove BPE symbols or escape <unk> words. """ if torch.is_tensor(tensor) and tensor.dim() == 2: return "\n".join( self.string( t, bpe_symbol, escape_unk, extra_symbols_to_ignore, include_eos=include_eos, ) for t in tensor ) extra_symbols_to_ignore = set(extra_symbols_to_ignore or []) extra_symbols_to_ignore.add(self.eos()) def token_string(i): if i == self.unk(): if unk_string is not None: return unk_string else: return self.unk_string(escape_unk) else: return self[i] if hasattr(self, "bos_index"): extra_symbols_to_ignore.add(self.bos()) sent = separator.join( token_string(i) for i in tensor if utils.item(i) not in extra_symbols_to_ignore ) return data_utils.post_process(sent, bpe_symbol) def unk_string(self, escape=False): """Return unknown string, optionally escaped as: <<unk>>""" if escape: return "<{}>".format(self.unk_word) else: return self.unk_word def add_symbol(self, word, n=1, overwrite=False): """Adds a word to the dictionary""" if word in self.indices and not overwrite: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2]) def finalize(self, threshold=-1, nwords=-1, padding_factor=8): """Sort symbols by frequency in descending order, ignoring special ones. Args: - threshold defines the minimum word count - nwords defines the total number of words in the final dictionary, including special symbols - padding_factor can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ if nwords <= 0: nwords = len(self) new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial))) new_symbols = self.symbols[: self.nspecial] new_count = self.count[: self.nspecial] c = Counter( dict( sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :])) ) ) for symbol, count in c.most_common(nwords - self.nspecial): if count >= threshold: new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(count) else: break assert len(new_symbols) == len(new_indices) self.count = list(new_count) self.symbols = list(new_symbols) self.indices = new_indices self.pad_to_multiple_(padding_factor) def pad_to_multiple_(self, padding_factor): """Pad Dictionary size to be a multiple of *padding_factor*.""" if padding_factor > 1: i = 0 while len(self) % padding_factor != 0: symbol = "madeupword{:04d}".format(i) self.add_symbol(symbol, n=0) i += 1 def bos(self): """Helper to get index of beginning-of-sentence symbol""" return self.bos_index def pad(self): """Helper to get index of pad symbol""" return self.pad_index def eos(self): """Helper to get index of end-of-sentence symbol""" return self.eos_index def unk(self): """Helper to get index of unk symbol""" return self.unk_index @classmethod def load(cls, f): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ d = cls() d.add_from_file(f) return d def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception( "Incorrect encoding detected in {}, please " "rebuild the dataset".format(f) ) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: line, field = line.rstrip().rsplit(" ", 1) if field == "#fairseq:overwrite": overwrite = True line, field = line.rsplit(" ", 1) else: overwrite = False count = int(field) word = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(word) ) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError( "Incorrect dictionary format, expected '<token> <cnt> [flags]'" ) def _save(self, f, kv_iterator): if isinstance(f, str): PathManager.mkdirs(os.path.dirname(f)) with PathManager.open(f, "w", encoding="utf-8") as fd: return self.save(fd) for k, v in kv_iterator: print("{} {}".format(k, v), file=f) def _get_meta(self): return [], [] def _load_meta(self, lines): return 0 def save(self, f): """Stores dictionary into a text file""" ex_keys, ex_vals = self._get_meta() self._save( f, zip( ex_keys + self.symbols[self.nspecial :], ex_vals + self.count[self.nspecial :], ), ) def dummy_sentence(self, length): t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long() t[-1] = self.eos() return t def encode_line( self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False, ) -> torch.IntTensor: words = line_tokenizer(line) if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = self.add_symbol(word) else: idx = self.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = self.eos_index return ids @staticmethod def _add_file_to_dictionary_single_worker( filename, tokenize, eos_word, start_offset, end_offset, ): counter = Counter() with Chunker(filename, start_offset, end_offset) as line_iterator: for line in line_iterator: for word in tokenize(line): counter.update([word]) counter.update([eos_word]) return counter @staticmethod def add_file_to_dictionary(filename, dict, tokenize, num_workers): def merge_result(counter): for w, c in sorted(counter.items()): dict.add_symbol(w, c) local_file = PathManager.get_local_path(filename) offsets = find_offsets(local_file, num_workers) if num_workers > 1: chunks = zip(offsets, offsets[1:]) pool = Pool(processes=num_workers) results = [] for (start_offset, end_offset) in chunks: results.append( pool.apply_async( Dictionary._add_file_to_dictionary_single_worker, ( local_file, tokenize, dict.eos_word, start_offset, end_offset, ), ) ) pool.close() pool.join() for r in results: merge_result(r.get()) else: merge_result( Dictionary._add_file_to_dictionary_single_worker( local_file, tokenize, dict.eos_word, offsets[0], offsets[1] ) )
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/data
data
data_utils_fast
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from numba import jit @jit(nopython=True) def batch_by_size_vec(indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult): """A numba version of cython batch_by_size_vec from data_utils_fast.pyx""" indices_len = indices.shape[0] batches_ends = np.zeros(indices_len, dtype=np.int32) batches_ends_view = batches_ends num_tokens_view = num_tokens_vec pos = 0 new_batch_end = 0 new_batch_max_tokens = 0 new_batch_sentences = 0 new_batch_num_tokens = 0 overflow = False size_matches_with_bsz_mult = False batches_count = 0 batch_start = 0 tail_max_tokens = 0 batch_max_tokens = 0 for pos in range(indices_len): # At every pos we keep stats about the last complete batch [batch_start:batch_end), # and tail [batch_end:pos]. # 1) Every time when (batch + tail) forms a valid batch # (according to max_tokens, max_sentences and bsz_mult) we append tail to batch. # 2) When (batch+tail) violates max_tokens or max_sentences constraints # we finalize running batch, and tail becomes a new batch. # 3) There is a corner case when tail also violates constraints. # In that situation [batch_end:pos-1] (tail without the current pos) # gets added to the finalized batches, while [pos:pos] becomes a new tail. # # Important: For the sake of performance try to avoid using function calls within this loop. tail_max_tokens = tail_max_tokens \ if tail_max_tokens > num_tokens_view[pos] \ else num_tokens_view[pos] new_batch_end = pos + 1 new_batch_max_tokens = batch_max_tokens \ if batch_max_tokens > tail_max_tokens \ else tail_max_tokens new_batch_sentences = new_batch_end - batch_start new_batch_num_tokens = new_batch_sentences * new_batch_max_tokens overflow = (new_batch_sentences > max_sentences > 0 or new_batch_num_tokens > max_tokens > 0) size_matches_with_bsz_mult = (new_batch_sentences < bsz_mult or new_batch_sentences % bsz_mult == 0) if overflow: tail_num_tokens = tail_max_tokens * \ (new_batch_end - batches_ends_view[batches_count]) tail_overflow = tail_num_tokens > max_tokens > 0 # In case of a tail overflow finalize two batches if tail_overflow: batches_count += 1 batches_ends_view[batches_count] = pos tail_max_tokens = num_tokens_view[pos] batch_start = batches_ends_view[batches_count] batches_count += 1 new_batch_max_tokens = tail_max_tokens if overflow or size_matches_with_bsz_mult: batches_ends_view[batches_count] = new_batch_end batch_max_tokens = new_batch_max_tokens tail_max_tokens = 0 if batches_ends_view[batches_count] != indices_len: batches_count += 1 # Memory and time-efficient split return np.split(indices, batches_ends[:batches_count])
TensorFlow/Classification/ConvNets/utils
utils
data_utils
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import tensorflow as tf from utils import image_processing from utils import dali_utils from utils import hvd_wrapper as hvd __all__ = ["get_synth_input_fn", "normalized_inputs"] _R_MEAN = 123.68 _G_MEAN = 116.28 _B_MEAN = 103.53 _CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] _CHANNEL_STDS = [58.395, 57.120, 57.385] _NUM_CHANNELS = 3 def get_synth_input_fn(batch_size, height, width, num_channels, data_format, num_classes, dtype=tf.float32): """Returns an input function that returns a dataset with random data. This input_fn returns a data set that iterates over a set of random data and bypasses all preprocessing, e.g. jpeg decode and copy. The host to device copy is still included. This used to find the upper throughput bound when tunning the full input pipeline. Args: height: Integer height that will be used to create a fake image tensor. width: Integer width that will be used to create a fake image tensor. num_channels: Integer depth that will be used to create a fake image tensor. num_classes: Number of classes that should be represented in the fake labels tensor dtype: Data type for features/images. Returns: An input_fn that can be used in place of a real one to return a dataset that can be used for iteration. """ if data_format not in ["NHWC", "NCHW"]: raise ValueError("Unknown data_format: %s" % str(data_format)) if data_format == "NHWC": input_shape = [batch_size, height, width, num_channels] else: input_shape = [batch_size, num_channels, height, width] # Convert the inputs to a Dataset. inputs = tf.truncated_normal(input_shape, dtype=dtype, mean=127, stddev=60, name='synthetic_inputs') labels = tf.random_uniform([batch_size], minval=0, maxval=num_classes - 1, dtype=tf.int32, name='synthetic_labels') data = tf.data.Dataset.from_tensors((inputs, labels)) data = data.repeat() data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return data def get_tfrecords_input_fn(filenames, batch_size, height, width, training, distort_color, num_threads, deterministic): shuffle_buffer_size = 4096 if deterministic: seed = 13 * hvd.rank() else: seed = None ds = tf.data.Dataset.from_tensor_slices(filenames) if hvd.size() > 1 and training: ds = ds.shard(hvd.size(), hvd.rank()) ds = ds.interleave(tf.data.TFRecordDataset, cycle_length=10, block_length=8) def preproc_func(record): return image_processing.preprocess_image_record(record, height, width, _NUM_CHANNELS, training) if training: ds = ds.shuffle(buffer_size=shuffle_buffer_size, seed=seed) ds = ds.repeat().map(preproc_func, num_parallel_calls=num_threads) ds = ds.batch(batch_size=batch_size, drop_remainder=True) ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return ds def get_inference_input_fn(filenames, height, width, num_threads): ds = tf.data.Dataset.from_tensor_slices(filenames) counter = tf.data.Dataset.range(sys.maxsize) ds = tf.data.Dataset.zip((ds, counter)) def preproc_func(record, counter_): return image_processing.preprocess_image_file(record, height, width, _NUM_CHANNELS, is_training=False) ds = ds.apply( tf.data.experimental.map_and_batch(map_func=preproc_func, num_parallel_calls=num_threads, batch_size=1) ) ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return ds def get_dali_input_fn( filenames, idx_filenames, batch_size, height, width, training, distort_color, num_threads, deterministic ): if idx_filenames is None: raise ValueError("Must provide idx_filenames for DALI's reader") preprocessor = dali_utils.DALIPreprocessor( filenames, idx_filenames, height, width, batch_size, num_threads, dali_cpu=False, deterministic=deterministic, training=training ) images, labels = preprocessor.get_device_minibatches() return (images, labels) def normalized_inputs(inputs): num_channels = inputs.get_shape()[-1] if inputs.get_shape().ndims != 4: raise ValueError('Input must be of size [batch_size, height, width, C>0]') if len(_CHANNEL_MEANS) != num_channels: raise ValueError('len(means) must match the number of channels') # We have a 1-D tensor of means; convert to 3-D. means_per_channel = tf.reshape(_CHANNEL_MEANS, [1, 1, num_channels]) means_per_channel = tf.cast(means_per_channel, dtype=inputs.dtype) stds_per_channel = tf.reshape(_CHANNEL_STDS, [1, 1, num_channels]) stds_per_channel = tf.cast(stds_per_channel, dtype=inputs.dtype) inputs = tf.subtract(inputs, means_per_channel) return tf.divide(inputs, stds_per_channel) def get_serving_input_receiver_fn(batch_size, height, width, num_channels, data_format, dtype=tf.float32): if data_format not in ["NHWC", "NCHW"]: raise ValueError("Unknown data_format: %s" % str(data_format)) if data_format == "NHWC": input_shape = [batch_size] + [height, width, num_channels] else: input_shape = [batch_size] + [num_channels, height, width] def serving_input_receiver_fn(): features = tf.placeholder(dtype=dtype, shape=input_shape, name='input_tensor') return tf.estimator.export.TensorServingInputReceiver(features=features, receiver_tensors=features) return serving_input_receiver_fn
PyTorch/SpeechSynthesis/FastPitch
FastPitch
models
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import re import sys import torch from common.text.symbols import get_symbols, get_pad_idx from common.utils import DefaultAttrDict, AttrDict from fastpitch.model import FastPitch from fastpitch.model_jit import FastPitchJIT from hifigan.models import Generator try: from waveglow.model import WaveGlow from waveglow import model as glow from waveglow.denoiser import Denoiser sys.modules['glow'] = glow except ImportError: print("WARNING: Couldn't import WaveGlow") def parse_model_args(model_name, parser, add_help=False): if model_name == 'FastPitch': from fastpitch import arg_parser return arg_parser.parse_fastpitch_args(parser, add_help) elif model_name == 'HiFi-GAN': from hifigan import arg_parser return arg_parser.parse_hifigan_args(parser, add_help) elif model_name == 'WaveGlow': from waveglow.arg_parser import parse_waveglow_args return parse_waveglow_args(parser, add_help) else: raise NotImplementedError(model_name) def get_model(model_name, model_config, device, bn_uniform_init=False, forward_is_infer=False, jitable=False): """Chooses a model based on name""" del bn_uniform_init # unused (old name: uniform_initialize_bn_weight) if model_name == 'FastPitch': if jitable: model = FastPitchJIT(**model_config) else: model = FastPitch(**model_config) elif model_name == 'HiFi-GAN': model = Generator(model_config) elif model_name == 'WaveGlow': model = WaveGlow(**model_config) else: raise NotImplementedError(model_name) if forward_is_infer and hasattr(model, 'infer'): model.forward = model.infer return model.to(device) def get_model_config(model_name, args, ckpt_config=None): """ Get config needed to instantiate the model """ # Mark keys missing in `args` with an object (None is ambiguous) _missing = object() args = DefaultAttrDict(lambda: _missing, vars(args)) # `ckpt_config` is loaded from the checkpoint and has the priority # `model_config` is based on args and fills empty slots in `ckpt_config` if model_name == 'FastPitch': model_config = dict( # io n_mel_channels=args.n_mel_channels, # symbols n_symbols=(len(get_symbols(args.symbol_set)) if args.symbol_set is not _missing else _missing), padding_idx=(get_pad_idx(args.symbol_set) if args.symbol_set is not _missing else _missing), symbols_embedding_dim=args.symbols_embedding_dim, # input FFT in_fft_n_layers=args.in_fft_n_layers, in_fft_n_heads=args.in_fft_n_heads, in_fft_d_head=args.in_fft_d_head, in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size, in_fft_output_size=args.in_fft_output_size, p_in_fft_dropout=args.p_in_fft_dropout, p_in_fft_dropatt=args.p_in_fft_dropatt, p_in_fft_dropemb=args.p_in_fft_dropemb, # output FFT out_fft_n_layers=args.out_fft_n_layers, out_fft_n_heads=args.out_fft_n_heads, out_fft_d_head=args.out_fft_d_head, out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size, out_fft_output_size=args.out_fft_output_size, p_out_fft_dropout=args.p_out_fft_dropout, p_out_fft_dropatt=args.p_out_fft_dropatt, p_out_fft_dropemb=args.p_out_fft_dropemb, # duration predictor dur_predictor_kernel_size=args.dur_predictor_kernel_size, dur_predictor_filter_size=args.dur_predictor_filter_size, p_dur_predictor_dropout=args.p_dur_predictor_dropout, dur_predictor_n_layers=args.dur_predictor_n_layers, # pitch predictor pitch_predictor_kernel_size=args.pitch_predictor_kernel_size, pitch_predictor_filter_size=args.pitch_predictor_filter_size, p_pitch_predictor_dropout=args.p_pitch_predictor_dropout, pitch_predictor_n_layers=args.pitch_predictor_n_layers, # pitch conditioning pitch_embedding_kernel_size=args.pitch_embedding_kernel_size, # speakers parameters n_speakers=args.n_speakers, speaker_emb_weight=args.speaker_emb_weight, # energy predictor energy_predictor_kernel_size=args.energy_predictor_kernel_size, energy_predictor_filter_size=args.energy_predictor_filter_size, p_energy_predictor_dropout=args.p_energy_predictor_dropout, energy_predictor_n_layers=args.energy_predictor_n_layers, # energy conditioning energy_conditioning=args.energy_conditioning, energy_embedding_kernel_size=args.energy_embedding_kernel_size, ) elif model_name == 'HiFi-GAN': if args.hifigan_config is not None: assert ckpt_config is None, ( "Supplied --hifigan-config, but the checkpoint has a config. " "Drop the flag or remove the config from the checkpoint file.") print(f'HiFi-GAN: Reading model config from {args.hifigan_config}') with open(args.hifigan_config) as f: args = AttrDict(json.load(f)) model_config = dict( # generator architecture upsample_rates=args.upsample_rates, upsample_kernel_sizes=args.upsample_kernel_sizes, upsample_initial_channel=args.upsample_initial_channel, resblock=args.resblock, resblock_kernel_sizes=args.resblock_kernel_sizes, resblock_dilation_sizes=args.resblock_dilation_sizes, ) elif model_name == 'WaveGlow': model_config = dict( n_mel_channels=args.n_mel_channels, n_flows=args.flows, n_group=args.groups, n_early_every=args.early_every, n_early_size=args.early_size, WN_config=dict( n_layers=args.wn_layers, kernel_size=args.wn_kernel_size, n_channels=args.wn_channels ) ) else: raise NotImplementedError(model_name) # Start with ckpt_config, and fill missing keys from model_config final_config = {} if ckpt_config is None else ckpt_config.copy() missing_keys = set(model_config.keys()) - set(final_config.keys()) final_config.update({k: model_config[k] for k in missing_keys}) # If there was a ckpt_config, it should have had all args if ckpt_config is not None and len(missing_keys) > 0: print(f'WARNING: Keys {missing_keys} missing from the loaded config; ' 'using args instead.') assert all(v is not _missing for v in final_config.values()) return final_config def get_model_train_setup(model_name, args): """ Dump train setup for documentation purposes """ if model_name == 'FastPitch': return dict() elif model_name == 'HiFi-GAN': return dict( # audio segment_size=args.segment_size, filter_length=args.filter_length, num_mels=args.num_mels, hop_length=args.hop_length, win_length=args.win_length, sampling_rate=args.sampling_rate, mel_fmin=args.mel_fmin, mel_fmax=args.mel_fmax, mel_fmax_loss=args.mel_fmax_loss, max_wav_value=args.max_wav_value, # other seed=args.seed, # optimization base_lr=args.learning_rate, lr_decay=args.lr_decay, epochs_all=args.epochs, ) elif model_name == 'WaveGlow': return dict() else: raise NotImplementedError(model_name) def load_model_from_ckpt(checkpoint_data, model, key='state_dict'): if key is None: return checkpoint_data['model'], None sd = checkpoint_data[key] sd = {re.sub('^module\.', '', k): v for k, v in sd.items()} status = model.load_state_dict(sd, strict=False) return model, status def load_and_setup_model(model_name, parser, checkpoint, amp, device, unk_args=[], forward_is_infer=False, jitable=False): if checkpoint is not None: ckpt_data = torch.load(checkpoint) print(f'{model_name}: Loading {checkpoint}...') ckpt_config = ckpt_data.get('config') if ckpt_config is None: print(f'{model_name}: No model config in the checkpoint; using args.') else: print(f'{model_name}: Found model config saved in the checkpoint.') else: ckpt_config = None ckpt_data = {} model_parser = parse_model_args(model_name, parser, add_help=False) model_args, model_unk_args = model_parser.parse_known_args() unk_args[:] = list(set(unk_args) & set(model_unk_args)) model_config = get_model_config(model_name, model_args, ckpt_config) model = get_model(model_name, model_config, device, forward_is_infer=forward_is_infer, jitable=jitable) if checkpoint is not None: key = 'generator' if model_name == 'HiFi-GAN' else 'state_dict' model, status = load_model_from_ckpt(ckpt_data, model, key) missing = [] if status is None else status.missing_keys unexpected = [] if status is None else status.unexpected_keys # Attention is only used during training, we won't miss it if model_name == 'FastPitch': missing = [k for k in missing if not k.startswith('attention.')] unexpected = [k for k in unexpected if not k.startswith('attention.')] assert len(missing) == 0 and len(unexpected) == 0, ( f'Mismatched keys when loading parameters. Missing: {missing}, ' f'unexpected: {unexpected}.') if model_name == "WaveGlow": for k, m in model.named_modules(): m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability model = model.remove_weightnorm(model) elif model_name == 'HiFi-GAN': assert model_args.hifigan_config is not None or ckpt_config is not None, ( 'Use a HiFi-GAN checkpoint from NVIDIA DeepLearningExamples with ' 'saved config or supply --hifigan-config <json_file>.') model.remove_weight_norm() if amp: model.half() model.eval() return model.to(device), model_config, ckpt_data.get('train_setup', {}) def load_and_setup_ts_model(model_name, checkpoint, amp, device=None): print(f'{model_name}: Loading TorchScript checkpoint {checkpoint}...') model = torch.jit.load(checkpoint).eval() if device is not None: model = model.to(device) if amp: model.half() elif next(model.parameters()).dtype == torch.float16: raise ValueError('Trying to load FP32 model,' 'TS checkpoint is in FP16 precision.') return model def convert_ts_to_trt(model_name, ts_model, parser, amp, unk_args=[]): trt_parser = _parse_trt_compilation_args(model_name, parser, add_help=False) trt_args, trt_unk_args = trt_parser.parse_known_args() unk_args[:] = list(set(unk_args) & set(trt_unk_args)) if model_name == 'HiFi-GAN': return _convert_ts_to_trt_hifigan( ts_model, amp, trt_args.trt_min_opt_max_batch, trt_args.trt_min_opt_max_hifigan_length) else: raise NotImplementedError def _parse_trt_compilation_args(model_name, parent, add_help=False): """ Parse model and inference specific commandline arguments. """ parser = argparse.ArgumentParser(parents=[parent], add_help=add_help, allow_abbrev=False) trt = parser.add_argument_group(f'{model_name} Torch-TensorRT compilation parameters') trt.add_argument('--trt-min-opt-max-batch', nargs=3, type=int, default=(1, 8, 16), help='Torch-TensorRT min, optimal and max batch size') if model_name == 'HiFi-GAN': trt.add_argument('--trt-min-opt-max-hifigan-length', nargs=3, type=int, default=(100, 800, 1200), help='Torch-TensorRT min, optimal and max audio length (in frames)') return parser def _convert_ts_to_trt_hifigan(ts_model, amp, trt_min_opt_max_batch, trt_min_opt_max_hifigan_length, num_mels=80): import torch_tensorrt trt_dtype = torch.half if amp else torch.float print(f'Torch TensorRT: compiling HiFi-GAN for dtype {trt_dtype}.') min_shp, opt_shp, max_shp = zip(trt_min_opt_max_batch, (num_mels,) * 3, trt_min_opt_max_hifigan_length) compile_settings = { "inputs": [torch_tensorrt.Input( min_shape=min_shp, opt_shape=opt_shp, max_shape=max_shp, dtype=trt_dtype, )], "enabled_precisions": {trt_dtype}, "require_full_compilation": True, } trt_model = torch_tensorrt.compile(ts_model, **compile_settings) print('Torch TensorRT: compilation successful.') return trt_model
TensorFlow/Detection/SSD/models/research/object_detection/anchor_generators
anchor_generators
grid_anchor_generator
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Generates grid anchors on the fly as used in Faster RCNN. Generates grid anchors on the fly as described in: "Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks" Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. """ import tensorflow as tf from object_detection.core import anchor_generator from object_detection.core import box_list from object_detection.utils import ops class GridAnchorGenerator(anchor_generator.AnchorGenerator): """Generates a grid of anchors at given scales and aspect ratios.""" def __init__(self, scales=(0.5, 1.0, 2.0), aspect_ratios=(0.5, 1.0, 2.0), base_anchor_size=None, anchor_stride=None, anchor_offset=None): """Constructs a GridAnchorGenerator. Args: scales: a list of (float) scales, default=(0.5, 1.0, 2.0) aspect_ratios: a list of (float) aspect ratios, default=(0.5, 1.0, 2.0) base_anchor_size: base anchor size as height, width ( (length-2 float32 list or tensor, default=[256, 256]) anchor_stride: difference in centers between base anchors for adjacent grid positions (length-2 float32 list or tensor, default=[16, 16]) anchor_offset: center of the anchor with scale and aspect ratio 1 for the upper left element of the grid, this should be zero for feature networks with only VALID padding and even receptive field size, but may need additional calculation if other padding is used (length-2 float32 list or tensor, default=[0, 0]) """ # Handle argument defaults if base_anchor_size is None: base_anchor_size = [256, 256] base_anchor_size = tf.to_float(tf.convert_to_tensor(base_anchor_size)) if anchor_stride is None: anchor_stride = [16, 16] anchor_stride = tf.to_float(tf.convert_to_tensor(anchor_stride)) if anchor_offset is None: anchor_offset = [0, 0] anchor_offset = tf.to_float(tf.convert_to_tensor(anchor_offset)) self._scales = scales self._aspect_ratios = aspect_ratios self._base_anchor_size = base_anchor_size self._anchor_stride = anchor_stride self._anchor_offset = anchor_offset def name_scope(self): return 'GridAnchorGenerator' def num_anchors_per_location(self): """Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function. """ return [len(self._scales) * len(self._aspect_ratios)] def _generate(self, feature_map_shape_list): """Generates a collection of bounding boxes to be used as anchors. Args: feature_map_shape_list: list of pairs of convnet layer resolutions in the format [(height_0, width_0)]. For example, setting feature_map_shape_list=[(8, 8)] asks for anchors that correspond to an 8x8 layer. For this anchor generator, only lists of length 1 are allowed. Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if feature_map_shape_list, box_specs_list do not have the same length. ValueError: if feature_map_shape_list does not consist of pairs of integers """ if not (isinstance(feature_map_shape_list, list) and len(feature_map_shape_list) == 1): raise ValueError('feature_map_shape_list must be a list of length 1.') if not all([isinstance(list_item, tuple) and len(list_item) == 2 for list_item in feature_map_shape_list]): raise ValueError('feature_map_shape_list must be a list of pairs.') grid_height, grid_width = feature_map_shape_list[0] scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales, self._aspect_ratios) scales_grid = tf.reshape(scales_grid, [-1]) aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1]) anchors = tile_anchors(grid_height, grid_width, scales_grid, aspect_ratios_grid, self._base_anchor_size, self._anchor_stride, self._anchor_offset) num_anchors = anchors.num_boxes_static() if num_anchors is None: num_anchors = anchors.num_boxes() anchor_indices = tf.zeros([num_anchors]) anchors.add_field('feature_map_index', anchor_indices) return [anchors] def tile_anchors(grid_height, grid_width, scales, aspect_ratios, base_anchor_size, anchor_stride, anchor_offset): """Create a tiled set of anchors strided along a grid in image space. This op creates a set of anchor boxes by placing a "basis" collection of boxes with user-specified scales and aspect ratios centered at evenly distributed points along a grid. The basis collection is specified via the scale and aspect_ratios arguments. For example, setting scales=[.1, .2, .2] and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale .1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2 and aspect ratio 1/2. Each box is multiplied by "base_anchor_size" before placing it over its respective center. Grid points are specified via grid_height, grid_width parameters as well as the anchor_stride and anchor_offset parameters. Args: grid_height: size of the grid in the y direction (int or int scalar tensor) grid_width: size of the grid in the x direction (int or int scalar tensor) scales: a 1-d (float) tensor representing the scale of each box in the basis set. aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each box in the basis set. The length of the scales and aspect_ratios tensors must be equal. base_anchor_size: base anchor size as [height, width] (float tensor of shape [2]) anchor_stride: difference in centers between base anchors for adjacent grid positions (float tensor of shape [2]) anchor_offset: center of the anchor with scale and aspect ratio 1 for the upper left element of the grid, this should be zero for feature networks with only VALID padding and even receptive field size, but may need some additional calculation if other padding is used (float tensor of shape [2]) Returns: a BoxList holding a collection of N anchor boxes """ ratio_sqrts = tf.sqrt(aspect_ratios) heights = scales / ratio_sqrts * base_anchor_size[0] widths = scales * ratio_sqrts * base_anchor_size[1] # Get a grid of box centers y_centers = tf.to_float(tf.range(grid_height)) y_centers = y_centers * anchor_stride[0] + anchor_offset[0] x_centers = tf.to_float(tf.range(grid_width)) x_centers = x_centers * anchor_stride[1] + anchor_offset[1] x_centers, y_centers = ops.meshgrid(x_centers, y_centers) widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers) heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers) bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3) bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3) bbox_centers = tf.reshape(bbox_centers, [-1, 2]) bbox_sizes = tf.reshape(bbox_sizes, [-1, 2]) bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes) return box_list.BoxList(bbox_corners) def _center_size_bbox_to_corners_bbox(centers, sizes): """Converts bbox center-size representation to corners representation. Args: centers: a tensor with shape [N, 2] representing bounding box centers sizes: a tensor with shape [N, 2] representing bounding boxes Returns: corners: tensor with shape [N, 4] representing bounding boxes in corners representation """ return tf.concat([centers - .5 * sizes, centers + .5 * sizes], 1)
PyTorch/DrugDiscovery/MoFlow/moflow/data
data
encoding
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 Chengxi Zang # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from typing import Tuple import numpy as np from rdkit import Chem from moflow.config import BOND_TO_CODE, DUMMY_CODE class MolEncoder: """Encodes atoms and adjecency matrix. Args: out_size (int): It specifies the size of array returned by `get_input_features`. If the number of atoms in the molecule is less than this value, the returned arrays is padded to have fixed size. """ def __init__(self, out_size: int): super(MolEncoder, self).__init__() self.out_size = out_size def encode_mol(self, mol: Chem.Mol) -> Tuple[np.ndarray, np.ndarray]: """get input features Args: mol (Mol): Returns: """ mol = self._standardize_mol(mol) self._check_num_atoms(mol) atom_array = self.construct_atomic_number_array(mol) adj_array = self.construct_discrete_edge_matrix(mol) return atom_array, adj_array def _standardize_mol(self, mol: Chem.Mol) -> Chem.Mol: canonical_smiles = Chem.MolToSmiles(mol, isomericSmiles=False, canonical=True) mol = Chem.MolFromSmiles(canonical_smiles) Chem.Kekulize(mol) return mol def _check_num_atoms(self, mol: Chem.Mol) -> None: """Check number of atoms in `mol` does not exceed `out_size`""" num_atoms = mol.GetNumAtoms() if num_atoms > self.out_size: raise EncodingError(f'Number of atoms in mol {num_atoms} exceeds num_max_atoms {self.out_size}') def construct_atomic_number_array(self, mol: Chem.Mol) -> np.ndarray: """Returns atomic numbers of atoms consisting a molecule. Args: mol (rdkit.Chem.Mol): Input molecule. Returns: numpy.ndarray: an array consisting of atomic numbers of atoms in the molecule. """ atom_list = [a.GetAtomicNum() for a in mol.GetAtoms()] n_atom = len(atom_list) if self.out_size < n_atom: raise EncodingError(f'out_size {self.out_size} is smaller than number of atoms in mol {n_atom}') atom_array = np.full(self.out_size, DUMMY_CODE, dtype=np.uint8) atom_array[:n_atom] = atom_list return atom_array def construct_discrete_edge_matrix(self, mol: Chem.Mol) -> np.ndarray: """Returns the edge-type dependent adjacency matrix of the given molecule. Args: mol (rdkit.Chem.Mol): Input molecule. Returns: adj_array (numpy.ndarray): The adjacent matrix of the input molecule. It is symmetrical 2-dimensional array with shape (out_size, out_size), filled with integers representing bond types. It two atoms are not conncted, DUMMY_CODE is used instead. """ if mol is None: raise EncodingError('mol is None') n_atom = mol.GetNumAtoms() if self.out_size < n_atom: raise EncodingError(f'out_size {self.out_size} is smaller than number of atoms in mol {n_atom}') adjs = np.full((self.out_size, self.out_size), DUMMY_CODE, dtype=np.uint8) for bond in mol.GetBonds(): bond_type = bond.GetBondType() # we need to use code here - bond types are rdkit objects code = BOND_TO_CODE[bond_type] i = bond.GetBeginAtomIdx() j = bond.GetEndAtomIdx() adjs[[i, j], [j, i]] = code return adjs class EncodingError(Exception): pass
PyTorch/LanguageModeling/BART/bart/tokenization
tokenization
tokenization_bart
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import List, Optional from bart.tokenization.tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast from bart.tokenization.tokenization_utils_base import BatchEncoding logger = logging.getLogger(__name__) # vocab and merges same as roberta vocab_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-vocab.json" merges_url = "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-merges.txt" _all_bart_models = [ "facebook/bart-base", "facebook/bart-large", "facebook/bart-large-mnli", "facebook/bart-large-cnn", "facebook/bart-large-xsum", "yjernite/bart_eli5", ] class BartTokenizer(RobertaTokenizer): # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, } def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length decoder_inputs: BatchEncoding = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) for k, v in decoder_inputs.items(): model_inputs[f"decoder_{k}"] = v return model_inputs class BartTokenizerFast(RobertaTokenizerFast): # merges and vocab same as Roberta max_model_input_sizes = {m: 1024 for m in _all_bart_models} pretrained_vocab_files_map = { "vocab_file": {m: vocab_url for m in _all_bart_models}, "merges_file": {m: merges_url for m in _all_bart_models}, } def prepare_seq2seq_batch( self, src_texts: List[str], tgt_texts: Optional[List[str]] = None, max_length: Optional[int] = None, max_target_length: Optional[int] = None, padding: str = "longest", return_tensors: str = "None", truncation=True, **kwargs, ) -> BatchEncoding: r""" Prepare a batch that can be passed directly to an instance of :class:`~transformers.BartModel`. Args: src_texts: (:obj:`List[str]`): List of documents to summarize or source language texts. tgt_texts: (:obj:`List[str]`, `optional`): List of summaries or target language texts. max_length (:obj:`int`, `optional`): Controls the maximum length for encoder inputs (documents to summarize or source language texts). If left unset or set to :obj:`None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. max_target_length (:obj:`int`, `optional`): Controls the maximum length of decoder inputs (target language texts or summaries). If left unset or set to :obj:`None`, this will use the max_length value. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`False`): Activates and controls padding. Accepts the following values: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). return_tensors (:obj:`str` or :class:`~transformers.tokenization_utils_base.TensorType`, `optional`, defaults to "pt"): If set, will return tensors instead of list of python integers. Acceptable values are: * :obj:`'tf'`: Return TensorFlow :obj:`tf.constant` objects. * :obj:`'pt'`: Return PyTorch :obj:`torch.Tensor` objects. * :obj:`'np'`: Return Numpy :obj:`np.ndarray` objects. truncation (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.TruncationStrategy`, `optional`, defaults to :obj:`True`): Activates and controls truncation. Accepts the following values: * :obj:`True` or :obj:`'longest_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_first'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`'only_second'`: Truncate to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. * :obj:`False` or :obj:`'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). **kwargs: Additional keyword arguments passed along to :obj:`self.__call__`. Returns: :class:`~transformers.BatchEncoding`: A :class:`~transformers.BatchEncoding` with the following fields: - **input_ids** -- List of token ids to be fed to the encoder. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model. - **decoder_input_ids** -- List of token ids to be fed to the decoder. - **decoder_attention_mask** -- List of indices specifying which tokens should be attended to by the decoder. This does not include causal mask, which is built by the model. The full set of keys ``[input_ids, attention_mask, decoder_input_ids, decoder_attention_mask]``, will only be returned if tgt_texts is passed. Otherwise, input_ids, attention_mask will be the only keys. """ if max_length is None: max_length = self.model_max_length model_inputs: BatchEncoding = self( src_texts, add_special_tokens=True, return_tensors=return_tensors, max_length=max_length, padding=padding, truncation=truncation, **kwargs, ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: max_target_length = max_length decoder_inputs: BatchEncoding = self( tgt_texts, add_special_tokens=True, return_tensors=return_tensors, padding=padding, max_length=max_target_length, truncation=truncation, **kwargs, ) for k, v in decoder_inputs.items(): model_inputs[f"decoder_{k}"] = v return model_inputs
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops
ops
training_ops
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training specific ops, including sampling, building targets, etc.""" from __future__ import absolute_import, division, print_function import tensorflow as tf from mrcnn_tf2.object_detection import balanced_positive_negative_sampler from mrcnn_tf2.ops import spatial_transform_ops from mrcnn_tf2.utils import box_utils _EPSILON = 1e-8 def _add_class_assignments(iou, gt_boxes, gt_labels): """Computes object category assignment for each box. Args: iou: a tensor for the iou matrix with a shape of [batch_size, K, MAX_NUM_INSTANCES]. K is the number of post-nms RoIs (i.e., rpn_post_nms_topn). gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with negative values. The coordinates of gt_boxes are in the pixel coordinates of the scaled image scale. gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This tensor might have paddings with a value of -1. Returns: max_boxes: a tensor with a shape of [batch_size, K, 4], representing the ground truth coordinates of each roi. max_classes: a int32 tensor with a shape of [batch_size, K], representing the ground truth class of each roi. max_overlap: a tensor with a shape of [batch_size, K], representing the maximum overlap of each roi. argmax_iou: a tensor with a shape of [batch_size, K], representing the iou argmax. """ with tf.name_scope('add_class_assignments'): batch_size, _, _ = iou.get_shape().as_list() argmax_iou = tf.argmax(input=iou, axis=2, output_type=tf.int32) indices = tf.reshape( argmax_iou + tf.expand_dims(tf.range(batch_size) * tf.shape(input=gt_labels)[1], 1), shape=[-1] ) max_classes = tf.reshape(tf.gather(tf.reshape(gt_labels, [-1, 1]), indices), [batch_size, -1]) max_overlap = tf.reduce_max(input_tensor=iou, axis=2) bg_mask = tf.equal(max_overlap, tf.zeros_like(max_overlap)) max_classes = tf.where(bg_mask, tf.zeros_like(max_classes), max_classes) max_boxes = tf.reshape( tf.gather(tf.reshape(gt_boxes, [-1, 4]), indices), [batch_size, -1, 4] ) max_boxes = tf.where( tf.tile(tf.expand_dims(bg_mask, axis=2), [1, 1, 4]), tf.zeros_like(max_boxes), max_boxes ) return max_boxes, max_classes, max_overlap, argmax_iou def encode_box_targets(boxes, gt_boxes, gt_labels, bbox_reg_weights): """Encodes predicted boxes with respect to ground truth boxes.""" with tf.name_scope('encode_box_targets'): box_targets = box_utils.encode_boxes(boxes=gt_boxes, anchors=boxes, weights=bbox_reg_weights) # If a target is background, the encoded box target should be zeros. mask = tf.tile(tf.expand_dims(tf.equal(gt_labels, tf.zeros_like(gt_labels)), axis=2), [1, 1, 4]) box_targets = tf.where(mask, tf.zeros_like(box_targets), box_targets) return box_targets def proposal_label_op(boxes, gt_boxes, gt_labels, batch_size_per_im=512, fg_fraction=0.25, fg_thresh=0.5, bg_thresh_hi=0.5, bg_thresh_lo=0.): """Assigns the proposals with ground truth labels and performs subsmpling. Given proposal `boxes`, `gt_boxes`, and `gt_labels`, the function uses the following algorithm to generate the final `batch_size_per_im` RoIs. 1. Calculates the IoU between each proposal box and each gt_boxes. 2. Assigns each proposal box with a ground truth class and box label by choosing the largest overlap. 3. Samples `batch_size_per_im` boxes from all proposal boxes, and returns box_targets, class_targets, and RoIs. The reference implementations of #1 and #2 are here: https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py The reference implementation of #3 is here: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py Args: boxes: a tensor with a shape of [batch_size, N, 4]. N is the number of proposals before groundtruth assignment (e.g., rpn_post_nms_topn). The last dimension is the pixel coordinates of scaled images in [ymin, xmin, ymax, xmax] form. gt_boxes: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This tensor might have paddings with a value of -1. The coordinates of gt_boxes are in the pixel coordinates of the scaled image. gt_labels: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES]. This tensor might have paddings with a value of -1. batch_size_per_im: a integer represents RoI minibatch size per image. fg_fraction: a float represents the target fraction of RoI minibatch that is labeled foreground (i.e., class > 0). fg_thresh: a float represents the overlap threshold for an RoI to be considered foreground (if >= fg_thresh). bg_thresh_hi: a float represents the overlap threshold for an RoI to be considered background (class = 0 if overlap in [LO, HI)). bg_thresh_lo: a float represents the overlap threshold for an RoI to be considered background (class = 0 if overlap in [LO, HI)). Returns: box_targets: a tensor with a shape of [batch_size, K, 4]. The tensor contains the ground truth pixel coordinates of the scaled images for each roi. K is the number of sample RoIs (e.g., batch_size_per_im). class_targets: a integer tensor with a shape of [batch_size, K]. The tensor contains the ground truth class for each roi. rois: a tensor with a shape of [batch_size, K, 4], representing the coordinates of the selected RoI. proposal_to_label_map: a tensor with a shape of [batch_size, K]. This tensor keeps the mapping between proposal to labels. proposal_to_label_map[i] means the index of the ground truth instance for the i-th proposal. """ with tf.name_scope('proposal_label'): batch_size = boxes.shape[0] # fixed problems when running with Keras AMP gt_boxes = tf.cast(gt_boxes, dtype=tf.float32) # The reference implementation intentionally includes ground truth boxes in # the proposals. # see: # https://github.com/facebookresearch/Detectron/blob/master/detectron/datasets/json_dataset.py#L359 boxes = tf.concat([boxes, gt_boxes], axis=1) iou = box_utils.bbox_overlap(boxes, gt_boxes) (pre_sample_box_targets, pre_sample_class_targets, max_overlap, proposal_to_label_map) = _add_class_assignments(iou, gt_boxes, gt_labels) # Generates a random sample of RoIs comprising foreground and background # examples. # reference: # https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py#L132 positives = tf.greater(max_overlap, fg_thresh * tf.ones_like(max_overlap)) negatives = tf.logical_and( tf.greater_equal(max_overlap, bg_thresh_lo * tf.ones_like(max_overlap)), tf.less(max_overlap, bg_thresh_hi * tf.ones_like(max_overlap)) ) pre_sample_class_targets = tf.where( negatives, tf.zeros_like(pre_sample_class_targets), pre_sample_class_targets ) proposal_to_label_map = tf.where( negatives, tf.zeros_like(proposal_to_label_map), proposal_to_label_map ) # Handles ground truth paddings. ignore_mask = tf.less(tf.reduce_min(input_tensor=iou, axis=2), tf.zeros_like(max_overlap)) # indicator includes both positive and negative labels. # labels includes only positives labels. # positives = indicator & labels. # negatives = indicator & !labels. # ignore = !indicator. labels = positives pos_or_neg = tf.logical_or(positives, negatives) indicator = tf.logical_and(pos_or_neg, tf.logical_not(ignore_mask)) all_samples = [] sampler = balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( positive_fraction=fg_fraction, is_static=True ) # Batch-unroll the sub-sampling process. for i in range(batch_size): samples = sampler.subsample(indicator[i], batch_size_per_im, labels[i]) all_samples.append(samples) all_samples = tf.stack([all_samples], axis=0)[0] # A workaround to get the indices from the boolean tensors. _, samples_indices = tf.nn.top_k( tf.cast(all_samples, dtype=tf.int32), k=batch_size_per_im, sorted=True ) # Contructs indices for gather. samples_indices = tf.reshape( samples_indices + tf.expand_dims(tf.range(batch_size) * tf.shape(input=boxes)[1], 1), [-1] ) rois = tf.reshape( tf.gather(tf.reshape(boxes, [-1, 4]), samples_indices), [batch_size, -1, 4] ) class_targets = tf.reshape( tf.gather(tf.reshape(pre_sample_class_targets, [-1, 1]), samples_indices), [batch_size, -1] ) sample_box_targets = tf.reshape( tf.gather(tf.reshape(pre_sample_box_targets, [-1, 4]), samples_indices), [batch_size, -1, 4] ) sample_proposal_to_label_map = tf.reshape( tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), samples_indices), [batch_size, -1] ) return sample_box_targets, class_targets, rois, sample_proposal_to_label_map def select_fg_for_masks(class_targets, box_targets, boxes, proposal_to_label_map, max_num_fg=128): """Selects the fore ground objects for mask branch during training. Args: class_targets: a tensor of shape [batch_size, num_boxes] representing the class label for each box. box_targets: a tensor with a shape of [batch_size, num_boxes, 4]. The tensor contains the ground truth pixel coordinates of the scaled images for each roi. boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents a box with [y1, x1, y2, x2] in un-normalized coordinates. proposal_to_label_map: a tensor with a shape of [batch_size, num_boxes]. This tensor keeps the mapping between proposal to labels. proposal_to_label_map[i] means the index of the ground truth instance for the i-th proposal. max_num_fg: a integer represents the number of masks per image. Returns: class_targets, boxes, proposal_to_label_map, box_targets that have foreground objects. """ # Masks are for positive (fg) objects only. # Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py batch_size = boxes.shape[0] _, fg_indices = tf.nn.top_k(tf.cast(tf.greater(class_targets, 0), dtype=tf.float32), k=max_num_fg) # Contructs indices for gather. indices = tf.reshape( fg_indices + tf.expand_dims(tf.range(batch_size) * tf.shape(input=class_targets)[1], 1), shape=[-1] ) fg_class_targets = tf.reshape( tf.gather(tf.reshape(class_targets, [-1, 1]), indices), [batch_size, -1] ) fg_box_targets = tf.reshape( tf.gather(tf.reshape(box_targets, [-1, 4]), indices), [batch_size, -1, 4] ) fg_box_rois = tf.reshape( tf.gather(tf.reshape(boxes, [-1, 4]), indices), [batch_size, -1, 4] ) fg_proposal_to_label_map = tf.reshape( tf.gather(tf.reshape(proposal_to_label_map, [-1, 1]), indices), [batch_size, -1] ) return (fg_class_targets, fg_box_targets, fg_box_rois, fg_proposal_to_label_map) def get_mask_targets(fg_boxes, fg_proposal_to_label_map, fg_box_targets, mask_gt_labels, output_size=28): """Crop and resize on multilevel feature pyramid. Args: fg_boxes: A 3-D tensor of shape [batch_size, num_masks, 4]. Each row represents a box with [y1, x1, y2, x2] in un-normalized coordinates. fg_proposal_to_label_map: A tensor of shape [batch_size, num_masks]. fg_box_targets: a float tensor representing the box label for each box with a shape of [batch_size, num_masks, 4]. mask_gt_labels: A tensor with a shape of [batch_size, M, H+4, W+4]. M is NUM_MAX_INSTANCES (i.e., 100 in this implementation) in each image, while H and W are ground truth mask size. The `+4` comes from padding of two zeros in both directions of height and width dimension. output_size: A scalar to indicate the output crop size. Returns: A 4-D tensor representing feature crop of shape [batch_size, num_boxes, output_size, output_size]. """ _, _, max_feature_height, max_feature_width = mask_gt_labels.get_shape().as_list() # proposal_to_label_map might have a -1 paddings. levels = tf.maximum(fg_proposal_to_label_map, 0) # Projects box location and sizes to corresponding cropped ground truth # mask coordinates. bb_y_min, bb_x_min, bb_y_max, bb_x_max = tf.split(value=fg_boxes, num_or_size_splits=4, axis=2) gt_y_min, gt_x_min, gt_y_max, gt_x_max = tf.split(value=fg_box_targets, num_or_size_splits=4, axis=2) valid_feature_width = max_feature_width - 4 valid_feature_height = max_feature_height - 4 y_transform = (bb_y_min - gt_y_min) * valid_feature_height / (gt_y_max - gt_y_min + _EPSILON) + 2 x_transform = (bb_x_min - gt_x_min) * valid_feature_width / (gt_x_max - gt_x_min + _EPSILON) + 2 h_transform = (bb_y_max - bb_y_min) * valid_feature_height / (gt_y_max - gt_y_min + _EPSILON) w_transform = (bb_x_max - bb_x_min) * valid_feature_width / (gt_x_max - gt_x_min + _EPSILON) boundaries = tf.concat( [ tf.cast(tf.ones_like(y_transform) * (max_feature_height - 1), dtype=tf.float32), tf.cast(tf.ones_like(x_transform) * (max_feature_width - 1), dtype=tf.float32) ], axis=-1 ) features_per_box = spatial_transform_ops.selective_crop_and_resize( tf.expand_dims(mask_gt_labels, -1), tf.concat([y_transform, x_transform, h_transform, w_transform], -1), tf.expand_dims(levels, -1), boundaries, output_size ) features_per_box = tf.squeeze(features_per_box, axis=-1) # Masks are binary outputs. features_per_box = tf.where( tf.greater_equal(features_per_box, 0.5), tf.ones_like(features_per_box), tf.zeros_like(features_per_box) ) # mask_targets depend on box RoIs, which have gradients. This stop_gradient # prevents the flow of gradient to box RoIs. features_per_box = tf.stop_gradient(features_per_box) return features_per_box
PyTorch/Detection/Efficientdet/scripts/D0
D0
train_TF32_8xA100-80G
#!/bin/bash function get_dataloader_workers { gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader) core=$(nproc --all) workers=$((core/gpus-2)) workers=$((workers>16?16:workers)) echo ${workers} } WORKERS=$(get_dataloader_workers) ./distributed_train.sh 8 /workspace/object_detection/datasets/coco --model efficientdet_d0 -b 80 --lr 0.9 --opt fusedmomentum --warmup-epochs 50 --lr-noise 0.4 0.9 --output /model --worker ${WORKERS} --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 200 --epochs 300 --resume --smoothing 0.0 --pretrained-backbone-path /backbone_checkpoints/jocbackbone_statedict_B0.pth --memory-format nchw --sync-bn --fused-focal-loss --seed 12711
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssd_mobilenet_v1_pets
# SSD with Mobilenet v1, configured for Oxford-IIIT Pets Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { ssd { num_classes: 37 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 1 box_code_size: 4 apply_sigmoid_to_scores: false conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } } feature_extractor { type: 'ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 1.0 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } loss { classification_loss { weighted_sigmoid { } } localization_loss { weighted_smooth_l1 { } } hard_example_miner { num_hard_examples: 3000 iou_threshold: 0.99 loss_type: CLASSIFICATION max_negatives_per_positive: 3 min_negatives_per_image: 0 } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { batch_size: 24 optimizer { rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true load_all_detection_checkpoint_vars: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" num_examples: 1100 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow/Segmentation/UNet_Medical
UNet_Medical
download_dataset
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os PARSER = argparse.ArgumentParser(description="U-Net medical") PARSER.add_argument('--data_dir', type=str, default='./data', help="""Directory where to download the dataset""") def main(): FLAGS = PARSER.parse_args() if not os.path.exists(FLAGS.data_dir): os.makedirs(FLAGS.data_dir) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-volume.tif -P {}'.format(FLAGS.data_dir)) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-labels.tif -P {}'.format(FLAGS.data_dir)) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/test-volume.tif -P {}'.format(FLAGS.data_dir)) print("Finished downloading files for U-Net medical to {}".format(FLAGS.data_dir)) if __name__ == '__main__': main()
PyTorch/Segmentation/MaskRCNN/pytorch/configs/pascal_voc
pascal_voc
e2e_faster_rcnn_R_50_C4_1x_4_gpu_voc
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" RPN: PRE_NMS_TOP_N_TEST: 6000 POST_NMS_TOP_N_TEST: 300 ANCHOR_SIZES: (128, 256, 512) ROI_BOX_HEAD: NUM_CLASSES: 21 DATASETS: TRAIN: ("voc_2007_train", "voc_2007_val") TEST: ("voc_2007_test",) SOLVER: BASE_LR: 0.004 WEIGHT_DECAY: 0.0001 STEPS: (12500, ) MAX_ITER: 17500 IMS_PER_BATCH: 4 TEST: IMS_PER_BATCH: 4
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
np_box_list_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.np_box_list_test.""" import numpy as np import tensorflow as tf from object_detection.utils import np_box_list class BoxListTest(tf.test.TestCase): def test_invalid_box_data(self): with self.assertRaises(ValueError): np_box_list.BoxList([0, 0, 1, 1]) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int)) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float)) with self.assertRaises(ValueError): np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float)) def test_has_field_with_existed_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertTrue(boxlist.has_field('boxes')) def test_has_field_with_nonexisted_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertFalse(boxlist.has_field('scores')) def test_get_field_with_existed_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes)) def test_get_field_with_nonexited_field(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) boxlist = np_box_list.BoxList(boxes) with self.assertRaises(ValueError): boxlist.get_field('scores') class AddExtraFieldTest(tf.test.TestCase): def setUp(self): boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0], [0.0, 0.0, 20.0, 20.0]], dtype=float) self.boxlist = np_box_list.BoxList(boxes) def test_add_already_existed_field(self): with self.assertRaises(ValueError): self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float)) def test_add_invalid_field_data(self): with self.assertRaises(ValueError): self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float)) with self.assertRaises(ValueError): self.boxlist.add_field('scores', np.array([0.5, 0.7, 0.9, 0.1], dtype=float)) def test_add_single_dimensional_field_data(self): boxlist = self.boxlist scores = np.array([0.5, 0.7, 0.9], dtype=float) boxlist.add_field('scores', scores) self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores'))) def test_add_multi_dimensional_field_data(self): boxlist = self.boxlist labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int) boxlist.add_field('labels', labels) self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels'))) def test_get_extra_fields(self): boxlist = self.boxlist self.assertItemsEqual(boxlist.get_extra_fields(), []) scores = np.array([0.5, 0.7, 0.9], dtype=float) boxlist.add_field('scores', scores) self.assertItemsEqual(boxlist.get_extra_fields(), ['scores']) labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]], dtype=int) boxlist.add_field('labels', labels) self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels']) def test_get_coordinates(self): y_min, x_min, y_max, x_max = self.boxlist.get_coordinates() expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float) expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float) expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float) expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float) self.assertTrue(np.allclose(y_min, expected_y_min)) self.assertTrue(np.allclose(x_min, expected_x_min)) self.assertTrue(np.allclose(y_max, expected_y_max)) self.assertTrue(np.allclose(x_max, expected_x_max)) def test_num_boxes(self): boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float) boxlist = np_box_list.BoxList(boxes) expected_num_boxes = 2 self.assertEquals(boxlist.num_boxes(), expected_num_boxes) if __name__ == '__main__': tf.test.main()
MxNet/Classification/RN50v1.5
RN50v1.5
benchmarking
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from mxnet.io import DataIter import time class BenchmarkingDataIter: def __init__(self, data_iter, benchmark_iters=None): self.data_iter = data_iter self.benchmark_iters = benchmark_iters self.overall_time = 0 self.num = 0 def __iter__(self): iter(self.data_iter) return self def next(self): if self.benchmark_iters is not None and self.num >= self.benchmark_iters: raise StopIteration try: start_time = time.time() ret = self.data_iter.next() end_time = time.time() except StopIteration: if self.benchmark_iters is None: raise self.data_iter.reset() start_time = time.time() ret = self.data_iter.next() end_time = time.time() if self.num != 0: self.overall_time += end_time - start_time self.num += 1 return ret def __next__(self): return self.next() def __getattr__(self, attr): return getattr(self.data_iter, attr) def get_avg_time(self): if self.num <= 1: avg = float('nan') else: avg = self.overall_time / (self.num - 1) return avg def reset(self): self.overall_time = 0 self.num = 0 self.data_iter.reset()
TensorFlow/Segmentation/UNet_Industrial/datasets
datasets
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== from datasets import core from datasets.dagm2007 import DAGM2007_Dataset known_datasets = {cls.dataset_name: cls for cls in core.BaseDataset.__subclasses__()} __all__ = ['known_datasets']
PyTorch/SpeechSynthesis/Tacotron2/phrases
phrases
phrase
The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves.
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/hydra/job_logging
job_logging
primary
# @package _group_ # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. version: 1 formatters: simple: format: '%(message)s' handlers: console: class: logging.StreamHandler formatter: simple stream: ext://sys.stdout root: handlers: [console] level: INFO disable_existing_loggers: false