relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
PyTorch/Classification/ConvNets/triton
triton
requirements
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. networkx==2.5 onnx==1.8.0 onnxruntime==1.5.2 pycuda>=2019.1.2 PyYAML>=5.2 tqdm>=4.44.1 tabulate>=0.8.7 natsort>=7.0.0 # use tags instead of branch names - because there might be docker cache hit causing not fetching most recent changes on branch model_navigator @ git+https://github.com/triton-inference-server/model_navigator.git@v0.1.0#egg=model_navigator
PyTorch/SpeechSynthesis/Tacotron2
Tacotron2
run_latency_tests
unset CUDA_VISIBLE_DEVICES bash test_infer.sh -bs 1 -il 128 --fp16 --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256 bash test_infer.sh -bs 4 -il 128 --fp16 --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256 bash test_infer.sh -bs 1 -il 128 --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256 bash test_infer.sh -bs 4 -il 128 --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256 export CUDA_VISIBLE_DEVICES= export OMP_NUM_THREADS=6 export KMP_BLOCKTIME=0 export KMP_AFFINITY=granularity=fine,compact,1,0 bash test_infer.sh -bs 1 -il 128 --cpu --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256 bash test_infer.sh -bs 4 -il 128 --cpu --num-iters 1003 --tacotron2 ./checkpoints/tacotron2_1032590_6000_amp --waveglow ./checkpoints/waveglow_1076430_14000_amp --wn-channels 256
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2PrenetPlugin
taco2PrenetPlugin
taco2PrenetLayerPluginCreator
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_PRENETLAYERPLUGINCREATOR_H #define TT2I_PRENETLAYERPLUGINCREATOR_H #include "NvInfer.h" #include <string> #ifdef DEVEL // The destructor of nvinfer1::IPluginCreator is non-virtual and public, so // we need to supress the warning. #pragma GCC diagnostic ignored "-Wnon-virtual-dtor" #endif namespace nvinfer1 { namespace plugin { class Taco2PrenetLayerPluginCreator : public nvinfer1::IPluginCreator { public: /** * @brief Get the collection of fields for this plugin, with their names only. * * @return The collection of fields. */ static nvinfer1::PluginFieldCollection* getFields(); /** * @brief Create a new Taco2PrenetLayerPluginCreator. */ Taco2PrenetLayerPluginCreator(); /** * @brief Get the name of the plugin. * * @return The name of the plugin. */ const char* getPluginName() const override; /** * @brief Get the plugin version. * * @return The plugin version. */ const char* getPluginVersion() const override; /** * @brief Get the collection of fields for this plugin. * * @return The collection of fields. */ const nvinfer1::PluginFieldCollection* getFieldNames() override; /** * @brief Create a new Taco2PrenetLayerPlugin. * * @param name The name (unused currently). * @param fc The collection of fields to initialize with. * * @return The created plugin. */ nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override; /** * @brief Create a custom layer by name from a data stream. * * @param layerName The name of the layer. * @param serialData The serialized data for the layer. * @param serialLength The length of the serialized data. * * @return The plugin. Clients must destroy the plugin once all consumers of * it have been destroyed. */ nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override; /** * @brief Set the namespace for created plugins. * * @param pluginNamespace The namespace. */ void setPluginNamespace(const char* pluginNamespace) override; /** * @brief Get the namespace for created plugins. * * @return The namespace. */ const char* getPluginNamespace() const override; private: std::string mNamespace; }; } // namespace plugin } // namespace nvinfer1 #ifdef DEVEL #pragma GCC diagnostic pop #endif #endif
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/TF32
TF32
train_benchmark_8xA100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode train_and_eval \ --use_xla \ --model_dir ./output/ \ --data_dir /data/ \ --log_steps 500 \ --save_checkpoint_freq 10 \ --n_stages 1 \ --max_epochs 3 \ --steps_per_epoch 2000 \ --train_batch_size 230 \ --train_img_size 300 \ --lr_decay cosine \ --lr_init 0.005 \ --weight_decay .000005 \ --opt_epsilon 0.001 \ --moving_average_decay 0.9999 \ --eval_img_size 384 \ --eval_batch_size 128 \ --augmenter_name randaugment \ --raug_num_layers 2 \ --raug_magnitude 15 \ --cutmix_alpha 0 \ --mixup_alpha 0 \ --defer_img_mixing
TensorFlow/Detection/SSD/models/research/object_detection/test_images
test_images
image_info
Image provenance: image1.jpg: https://commons.wikimedia.org/wiki/File:Baegle_dwa.jpg image2.jpg: Michael Miley, https://www.flickr.com/photos/mike_miley/4678754542/in/photolist-88rQHL-88oBVp-88oC2B-88rS6J-88rSqm-88oBLv-88oBC4
TensorFlow/Detection/SSD/models/research/object_detection/metrics
metrics
offline_eval_map_corloc
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Evaluation executable for detection data. This executable evaluates precomputed detections produced by a detection model and writes the evaluation results into csv file metrics.csv, stored in the directory, specified by --eval_dir. The evaluation metrics set is supplied in object_detection.protos.EvalConfig in metrics_set field. Currently two set of metrics are supported: - pascal_voc_metrics: standard PASCAL VOC 2007 metric - open_images_detection_metrics: Open Image V2 metric All other field of object_detection.protos.EvalConfig are ignored. Example usage: ./compute_metrics \ --eval_dir=path/to/eval_dir \ --eval_config_path=path/to/evaluation/configuration/file \ --input_config_path=path/to/input/configuration/file """ import csv import os import re import tensorflow as tf from object_detection.core import standard_fields from object_detection.legacy import evaluator from object_detection.metrics import tf_example_parser from object_detection.utils import config_util from object_detection.utils import label_map_util flags = tf.app.flags tf.logging.set_verbosity(tf.logging.INFO) flags.DEFINE_string('eval_dir', None, 'Directory to write eval summaries to.') flags.DEFINE_string('eval_config_path', None, 'Path to an eval_pb2.EvalConfig config file.') flags.DEFINE_string('input_config_path', None, 'Path to an eval_pb2.InputConfig config file.') FLAGS = flags.FLAGS def _generate_sharded_filenames(filename): m = re.search(r'@(\d{1,})', filename) if m: num_shards = int(m.group(1)) return [ re.sub(r'@(\d{1,})', '-%.5d-of-%.5d' % (i, num_shards), filename) for i in range(num_shards) ] else: return [filename] def _generate_filenames(filenames): result = [] for filename in filenames: result += _generate_sharded_filenames(filename) return result def read_data_and_evaluate(input_config, eval_config): """Reads pre-computed object detections and groundtruth from tf_record. Args: input_config: input config proto of type object_detection.protos.InputReader. eval_config: evaluation config proto of type object_detection.protos.EvalConfig. Returns: Evaluated detections metrics. Raises: ValueError: if input_reader type is not supported or metric type is unknown. """ if input_config.WhichOneof('input_reader') == 'tf_record_input_reader': input_paths = input_config.tf_record_input_reader.input_path categories = label_map_util.create_categories_from_labelmap( input_config.label_map_path) object_detection_evaluators = evaluator.get_evaluators( eval_config, categories) # Support a single evaluator object_detection_evaluator = object_detection_evaluators[0] skipped_images = 0 processed_images = 0 for input_path in _generate_filenames(input_paths): tf.logging.info('Processing file: {0}'.format(input_path)) record_iterator = tf.python_io.tf_record_iterator(path=input_path) data_parser = tf_example_parser.TfExampleDetectionAndGTParser() for string_record in record_iterator: tf.logging.log_every_n(tf.logging.INFO, 'Processed %d images...', 1000, processed_images) processed_images += 1 example = tf.train.Example() example.ParseFromString(string_record) decoded_dict = data_parser.parse(example) if decoded_dict: object_detection_evaluator.add_single_ground_truth_image_info( decoded_dict[standard_fields.DetectionResultFields.key], decoded_dict) object_detection_evaluator.add_single_detected_image_info( decoded_dict[standard_fields.DetectionResultFields.key], decoded_dict) else: skipped_images += 1 tf.logging.info('Skipped images: {0}'.format(skipped_images)) return object_detection_evaluator.evaluate() raise ValueError('Unsupported input_reader_config.') def write_metrics(metrics, output_dir): """Write metrics to the output directory. Args: metrics: A dictionary containing metric names and values. output_dir: Directory to write metrics to. """ tf.logging.info('Writing metrics.') with open(os.path.join(output_dir, 'metrics.csv'), 'w') as csvfile: metrics_writer = csv.writer(csvfile, delimiter=',') for metric_name, metric_value in metrics.items(): metrics_writer.writerow([metric_name, str(metric_value)]) def main(argv): del argv required_flags = ['input_config_path', 'eval_config_path', 'eval_dir'] for flag_name in required_flags: if not getattr(FLAGS, flag_name): raise ValueError('Flag --{} is required'.format(flag_name)) configs = config_util.get_configs_from_multiple_files( eval_input_config_path=FLAGS.input_config_path, eval_config_path=FLAGS.eval_config_path) eval_config = configs['eval_config'] input_config = configs['eval_input_config'] metrics = read_data_and_evaluate(input_config, eval_config) # Save metrics write_metrics(metrics, FLAGS.eval_dir) if __name__ == '__main__': tf.app.run(main)
TensorFlow/Detection/SSD/models/research/object_detection/g3doc
g3doc
using_your_own_dataset
# Preparing Inputs [TOC] To use your own dataset in Tensorflow Object Detection API, you must convert it into the [TFRecord file format](https://www.tensorflow.org/api_guides/python/python_io#tfrecords_format_details). This document outlines how to write a script to generate the TFRecord file. ## Label Maps Each dataset is required to have a label map associated with it. This label map defines a mapping from string class names to integer class Ids. The label map should be a `StringIntLabelMap` text protobuf. Sample label maps can be found in object_detection/data. Label maps should always start from id 1. ## Dataset Requirements For every example in your dataset, you should have the following information: 1. An RGB image for the dataset encoded as jpeg or png. 2. A list of bounding boxes for the image. Each bounding box should contain: 1. A bounding box coordinates (with origin in top left corner) defined by 4 floating point numbers [ymin, xmin, ymax, xmax]. Note that we store the _normalized_ coordinates (x / width, y / height) in the TFRecord dataset. 2. The class of the object in the bounding box. # Example Image Consider the following image: ![Example Image](img/example_cat.jpg "Example Image") with the following label map: ``` item { id: 1 name: 'Cat' } item { id: 2 name: 'Dog' } ``` We can generate a tf.Example proto for this image using the following code: ```python def create_cat_tf_example(encoded_cat_image_data): """Creates a tf.Example proto from sample cat image. Args: encoded_cat_image_data: The jpg encoded data of the cat image. Returns: example: The created tf.Example. """ height = 1032.0 width = 1200.0 filename = 'example_cat.jpg' image_format = b'jpg' xmins = [322.0 / 1200.0] xmaxs = [1062.0 / 1200.0] ymins = [174.0 / 1032.0] ymaxs = [761.0 / 1032.0] classes_text = ['Cat'] classes = [1] tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_cat_image_data), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example ``` ## Conversion Script Outline {#conversion-script-outline} A typical conversion script will look like the following: ```python import tensorflow as tf from object_detection.utils import dataset_util flags = tf.app.flags flags.DEFINE_string('output_path', '', 'Path to output TFRecord') FLAGS = flags.FLAGS def create_tf_example(example): # TODO(user): Populate the following variables from your example. height = None # Image height width = None # Image width filename = None # Filename of the image. Empty if image is not from file encoded_image_data = None # Encoded image bytes image_format = None # b'jpeg' or b'png' xmins = [] # List of normalized left x coordinates in bounding box (1 per box) xmaxs = [] # List of normalized right x coordinates in bounding box # (1 per box) ymins = [] # List of normalized top y coordinates in bounding box (1 per box) ymaxs = [] # List of normalized bottom y coordinates in bounding box # (1 per box) classes_text = [] # List of string class name of bounding box (1 per box) classes = [] # List of integer class id of bounding box (1 per box) tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_image_data), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example def main(_): writer = tf.python_io.TFRecordWriter(FLAGS.output_path) # TODO(user): Write code to read in your dataset to examples variable for example in examples: tf_example = create_tf_example(example) writer.write(tf_example.SerializeToString()) writer.close() if __name__ == '__main__': tf.app.run() ``` Note: You may notice additional fields in some other datasets. They are currently unused by the API and are optional. Note: Please refer to the section on [Running an Instance Segmentation Model](instance_segmentation.md) for instructions on how to configure a model that predicts masks in addition to object bounding boxes. ## Sharding datasets When you have more than a few thousand examples, it is beneficial to shard your dataset into multiple files: * tf.data.Dataset API can read input examples in parallel improving throughput. * tf.data.Dataset API can shuffle the examples better with sharded files which improves performance of the model slightly. Instead of writing all tf.Example protos to a single file as shown in [conversion script outline](#conversion-script-outline), use the snippet below. ```python import contextlib2 from object_detection.dataset_tools import tf_record_creation_util num_shards=10 output_filebase='/path/to/train_dataset.record' with contextlib2.ExitStack() as tf_record_close_stack: output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( tf_record_close_stack, output_filebase, num_shards) for index, example in examples: tf_example = create_tf_example(example) output_shard_index = index % num_shards output_tfrecords[output_shard_index].write(tf_example.SerializeToString()) ``` This will produce the following output files ```bash /path/to/train_dataset.record-00000-00010 /path/to/train_dataset.record-00001-00010 ... /path/to/train_dataset.record-00009-00010 ``` which can then be used in the config file as below. ```bash tf_record_input_reader { input_path: "/path/to/train_dataset.record-?????-of-00010" } ```
Kaldi/SpeechRecognition/kaldi-asr-client
kaldi-asr-client
CMakeLists
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. cmake_minimum_required(VERSION 3.17..3.20) # # gRPC client for custom Kaldi backend # add_executable(kaldi-asr-parallel-client) add_executable(TritonKaldiGrpcClient::kaldi-asr-parallel-client ALIAS kaldi-asr-parallel-client) target_sources(kaldi-asr-parallel-client PRIVATE kaldi_asr_parallel_client.cc asr_client_imp.cc ) target_include_directories(kaldi-asr-parallel-client SYSTEM PRIVATE /opt/kaldi/src /opt/kaldi/tools/openfst/include ) target_include_directories(kaldi-asr-parallel-client PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ) target_compile_features(kaldi-asr-parallel-client PRIVATE cxx_std_17) target_compile_options(kaldi-asr-parallel-client PRIVATE $<$<OR:$<CXX_COMPILER_ID:Clang>,$<CXX_COMPILER_ID:AppleClang>,$<CXX_COMPILER_ID:GNU>>:-Wall -Wextra -Wno-unused-parameter -Wno-type-limits -Werror> ) target_link_directories(kaldi-asr-parallel-client PRIVATE /opt/kaldi/src/lib ) target_link_libraries(kaldi-asr-parallel-client PRIVATE TritonClient::grpcclient_static -lkaldi-base -lkaldi-util -lkaldi-matrix -lkaldi-feat -lkaldi-lat ) # # Install # include(GNUInstallDirs) install( TARGETS kaldi-asr-parallel-client EXPORT kaldi-asr-parallel-client-targets RUNTIME DESTINATION ${CMAKE_INSTALL_PREFIX}/bin )
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
faster_rcnn_inception_resnet_v2_atrous_cosine_lr_coco
# Faster R-CNN with Inception Resnet v2, Atrous version, with Cosine # Learning Rate schedule. # Trained on COCO, initialized from Imagenet classification checkpoint # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_inception_resnet_v2' first_stage_features_stride: 8 } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 8 width_stride: 8 } } first_stage_atrous_rate: 2 first_stage_box_predictor_conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } first_stage_nms_score_threshold: 0.0 first_stage_nms_iou_threshold: 0.7 first_stage_max_proposals: 300 first_stage_localization_loss_weight: 2.0 first_stage_objectness_loss_weight: 1.0 initial_crop_size: 17 maxpool_kernel_size: 1 maxpool_stride: 1 second_stage_box_predictor { mask_rcnn_box_predictor { use_dropout: false dropout_keep_probability: 1.0 fc_hyperparams { op: FC regularizer { l2_regularizer { weight: 0.0 } } initializer { variance_scaling_initializer { factor: 1.0 uniform: true mode: FAN_AVG } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.0 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SOFTMAX } second_stage_localization_loss_weight: 2.0 second_stage_classification_loss_weight: 1.0 } } train_config: { batch_size: 1 optimizer { momentum_optimizer: { learning_rate: { cosine_decay_learning_rate { learning_rate_base: 0.0006 total_steps: 1200000 warmup_learning_rate: 0.00006 warmup_steps: 20000 } } momentum_optimizer_value: 0.9 } use_moving_average: false } gradient_clipping_by_norm: 10.0 fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" data_augmentation_options { random_horizontal_flip { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { num_examples: 8000 # Note: The below line limits the evaluation process to 10 evaluations. # Remove the below line to evaluate indefinitely. max_evals: 10 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow/Recommendation/WideAndDeep
WideAndDeep
README
# Wide & Deep Recommender Model Training For TensorFlow This repository provides a script and recipe to train the Wide and Deep Recommender model to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA. Wide & Deep model for TensorFlow1 is no longer maintained and will soon become unavailable, please consider PyTorch or TensorFlow2 models as a substitute for your requirements. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Applications and dataset](#applications-and-dataset) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision](#mixed-precision) * [Enabling mixed precision](#enabling-mixed-precision) * [Impact of mixed precision on training accuracy](#impact-of-mixed-precision-on-training-accuracy) * [Impact of mixed precision on inference accuracy](#impact-of-mixed-precision-on-inference-accuracy) * [Enabling TF32](#enabling-tf32) * [Glossary](#glossary) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Spark preprocessing](#spark-preprocessing) * [Training process](#training-process) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 40GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-40gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb) * [Training accuracy plots](#training-accuracy-plots) * [Training stability test](#training-stability-test) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 40GB)](#training-performance-nvidia-dgx-a100-8x-a100-40gb) * [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview Recommendation systems drive engagement on many of the most popular online platforms. As the volume of data available to power these systems grows exponentially, data scientists are increasingly turning from more traditional machine learning methods to highly expressive deep learning models to improve the quality of their recommendations. Google's [Wide & Deep Learning for Recommender Systems](https://arxiv.org/abs/1606.07792) has emerged as a popular model for these problems both for its robustness to signal sparsity as well as its user-friendly implementation in [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNLinearCombinedClassifier). The differences between this Wide & Deep Recommender Model and the model from the paper is the size of the Deep part of the model. Originally, in Google's paper, the fully connected part was three layers of 1024, 512, and 256 neurons. Our model consists of 5 layers each of 1024 neurons. The model enables you to train a recommender model that combines the memorization of the Wide part and generalization of the Deep part of the network. This model is trained with mixed precision using Tensor Cores on NVIDIA Volta, Turing and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 1.49 times faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture Wide & Deep refers to a class of networks that use the output of two parts working in parallel - wide model and deep model - to make predictions of recommenders. The wide model is a generalized linear model of features together with their transforms. The deep model is a series of 5 hidden MLP layers of 1024 neurons each beginning with a dense embedding of features. The architecture is presented in Figure 1. <p align="center"> <img width="70%" src="https://developer.download.nvidia.com/w-and-d-recommender/model.svg" /> <br> Figure 1. The architecture of the Wide & Deep model.</a> </p> ### Applications and dataset As a reference dataset, we used a subset of [the features engineered](https://github.com/gabrielspmoreira/kaggle_outbrain_click_prediction_google_cloud_ml_engine) by the 19th place finisher in the [Kaggle Outbrain Click Prediction Challenge](https://www.kaggle.com/c/outbrain-click-prediction/). This competition challenged competitors to predict the likelihood with which a particular ad on a website's display would be clicked on. Competitors were given information about the user, display, document, and ad in order to train their models. More information can be found [here](https://www.kaggle.com/c/outbrain-click-prediction/data). ### Default configuration For reference, and to give context to the acceleration numbers described below, some important properties of our features and model are as follows: - Features - Request Level - 16 scalar numeric features `(shape=(1,)`) - 12 one-hot categorical features (all `int` dtype) - 5 indicator embeddings with sizes 2, 2, 3, 3, 6 - 7 trainable embeddings - all except two have an embedding size of 64 (remaining two have 128), though it's important to note for *all* categorical features that we *do not* leverage that information to short-circuit the lookups by treating them as a single multi-hot lookup. Our API is fully general to any combination of embedding sizes. - all use hash bucketing with `num_buckets=` 300k, 100k, 4k, 2.5k, 2k, 1k, and 300 respectively - 3 multi-hot categorical features (all `int` dtype) - all trainable embeddings - all with embedding size 64 - all use hash bucketing with `num_buckets=` 10k, 350, and 100 respectively - Item Level - 16 scalar numeric features - 4 one hot categorical features (all `int` dtype) - embedding sizes of 128, 64, 64, 64 respectively - hash bucketing with `num_buckets=` 250k, 4k, 2.5k, and 1k respectively - 3 multi-hot categorical features (all `int` dtype) - all with embedding size 64 - hash bucketing with `num_buckets=` 10k, 350, and 100 respectively - All features are used in both wide *and* deep branches of the network - Model - Total embedding dimension is 1328 - 5 hidden layers each with size 1024 - Output dimension is 1 (probability of click) ### Feature support matrix The following features are supported by this model: | Feature | Wide & Deep |-----------------------|-------------------------- |Horovod Multi-GPU | Yes |Automatic mixed precision (AMP) | Yes #### Features Horovod Horovod is a distributed training framework for TensorFlow, Keras, PyTorch and MXNet. The goal of Horovod is to make distributed deep learning fast and easy to use. For more information about how to get started with Horovod, see the [Horovod: Official repository](https://github.com/horovod/horovod). Multi-GPU training with Horovod Our model uses Horovod to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [TensorFlow tutorial](https://github.com/horovod/horovod/#usage). ### Mixed precision Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in the Volta and Turing architecture, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. #### Enabling mixed precision To enable Wide & Deep training to use mixed precision you don't need to perform input quantization, only an additional flag `--amp` to the training script is needed (see [Quick Start Guide](#quick-start-guide)). ##### Impact of mixed precision on training accuracy The accuracy of training, measured with MAP@12 metric was not impacted by enabling mixed precision. The obtained results were statistically similar (i.e. similar run-to-run variance was observed, with standard deviation of the level of `0.002`). ##### Impact of mixed precision on inference accuracy For our reference model, the average absolute error on the probability of interaction induced by reduced precision inference is `0.0002`, producing a near-perfect fit between predictions produced by full and mixed precision models. Moreover, this error is uncorrelated with the magnitude of the predicted value, which means for most predictions of interest (i.e. greater than `0.01` or `0.1` likelihood of interaction), the relative magnitude of the error is approaching the noise floor of the problem. #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Glossary Request level features: Features that describe the person or object _to which_ we wish to make recommendations. Item level features: Features that describe those objects which we are considering recommending. ## Setup The following section lists the requirements that you need to meet in order to start training the Wide & Deep model. ### Requirements This repository contains Dockerfile which extends the TensorFlow NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [20.10-tf1-py3](https://ngc.nvidia.com/catalog/containers/nvidia:tensorflow) NGC container - Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) - [Running TensorFlow](https://docs.nvidia.com/deeplearning/frameworks/tensorflow-release-notes/running.html#running) For those unable to use the TensorFlow NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the Wide & Deep model on the Outbrain dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/TensorFlow/Recommendation/WideAndDeep ``` 2. Download the Outbrain dataset. The Outbrain dataset can be downloaded from [Kaggle](https://www.kaggle.com/c/outbrain-click-prediction/data) (requires Kaggle account). Unzip the downloaded archive e.g. to `/raid/outbrain/orig` and set the `HOST_OUTBRAIN_PATH` variable to the parent directory: ```bash HOST_OUTBRAIN_PATH=/raid/outbrain ``` 3. Build the Wide & Deep Tensorflow NGC container. ```bash docker build . -t wide_deep ``` 4. Start an interactive session in the NGC container to run preprocessing/training/inference. ```bash docker run --runtime=nvidia --privileged --rm -ti -v ${HOST_OUTBRAIN_PATH}:/outbrain wide_deep /bin/bash ``` 5. Start preprocessing. ```bash bash scripts/preproc.sh 4096 ``` The result of preprocessing scripts are prebatched TFRecords. The argument to the script is the prebatch size (4096 is the default). 6. Start training. Single GPU: ```bash python -m trainer.task --gpu ``` 8 GPU: ```bash mpiexec --allow-run-as-root --bind-to socket -np 8 python -m trainer.task --gpu --hvd ``` If you want to run validation or inference, you can either use the checkpoint obtained from the training commands above, or download the pretrained checkpoint from NGC. In order to download the checkpoint from NGC, visit [ngc.nvidia.com](https://ngc.nvidia.com) website and browse the available models. Download the checkpoint files and unzip them to some path, e.g. to `/raid/outbrain/checkpoints/` (which is the default path for storing the checkpoints during training). 7. Start validation/evaluation. In order to validate the checkpoint on the evaluation set, run the `task.py` script with `--evaluate` flag: ```bash python -m trainer.task --gpu --evaluate --model_dir /outbrain/checkpoints ``` 8. Start inference/predictions. In order to run inference and predict the results, run the `task.py` script with `--predict` flag: ```bash python -m trainer.task --gpu --predict --model_dir /outbrain/checkpoints ``` ## Advanced The following sections provide greater details of the dataset, running training, and the training results. ### Scripts and sample code These are the important scripts in this repository: * `trainer/task.py` - Python script for training the Wide & Deep recommender model * `trainer/features.py` - Python file describing the request and item level features ### Parameters These are the important parameters in the `trainer/task.py` script: ``` --model_dir: Path to model checkpoint directory --deep_hidden_units: [DEEP_LAYER1 DEEP_LAYER2 ...] hidden units per layer, separated by spaces --prebatch_size: Number of samples in each pre-batch in tfrecords --global_batch_size: Training batch size (per all GPUs, must be a multiplicity of prebatch_size) --eval_batch_size: Evaluation batch size (must be a multiplicity of prebatch_size) --num_epochs: Number of epochs to train --linear_learning_rate: Learning rate for the wide part of the model --linear_l1_regularization: L1 regularization for the wide part of the model --linear_l2_regularization: L2 regularization for the wide part of the model --deep_learning_rate: Learning rate for the deep part of the model --deep_dropout: Dropout probability for deep model --deep_warmup_epochs: Number of epochs with linear learning rate warmup --predict: Perform only the prediction on the validation set, do not train --evaluate: Perform only the evaluation on the validation set, do not train --gpu: Run computations on GPU --amp: Enable Automatic Mixed Precision --xla: Enable XLA --hvd: Use Horovod for multi-GPU training --eval_epoch_interval: Perform evaluation every this many epochs ``` ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option: ```bash python -m trainer.task --help ``` ### Getting the data The Outbrain dataset can be downloaded from [Kaggle](https://www.kaggle.com/c/outbrain-click-prediction/data) (requires Kaggle account). #### Dataset guidelines The dataset contains a sample of users’ page views and clicks, as observed on multiple publisher sites. Viewed pages and clicked recommendations have additional semantic attributes of the documents. The dataset contains sets of content recommendations served to a specific user in a specific context. Each context (i.e. a set of recommended ads) is given a `display_id`. In each such recommendation set, the user has clicked on exactly one of the ads. The original data is stored in several separate files: - `page_views.csv` - log of users visiting documents (2B rows, ~100GB uncompressed) - `clicks_train.csv` - data showing which ad was clicked in each recommendation set (87M rows) - `clicks_test.csv` - used only for the submission in the original Kaggle contest - `events.csv` - metadata about the context of each recommendation set (23M rows) - `promoted_content.csv` - metadata about the ads - `document_meta.csv`, `document_topics.csv`, `document_entities.csv`, `document_categories.csv` - metadata about the documents During the preprocessing stage the data is transformed into 59M rows tabular data of 54 features and eventually saved in pre-batched TFRecord format. #### Spark preprocessing The original dataset is preprocessed using Spark scripts from the `preproc` directory. The preprocessing consists of the following operations: - separating out the validation set for cross-validation - filling missing data with the most frequent value - generating the user profiles from the page views data - joining the tables for the ad clicks data - computing click-through rates (CTR) for ads grouped by different contexts - computing cosine similarity between the features of the clicked ads and the viewed ads - math transformations of the numeric features (taking logarithm, scaling, binning) - storing the resulting set of features in TFRecord format The `preproc1-4.py` preprocessing scripts use PySpark. In the Docker image, we have installed Spark 2.3.1 as a standalone cluster of Spark. The `preproc1.py` script splits the data into a training set and a validation set. The `preproc2.py` script generates the user profiles from the page views data. The `preproc3.py` computes the click-through rates (CTR) and cosine similarities between the features. The `preproc4.py` script performs the math transformations and generates the final TFRecord files. The data in the output files is pre-batched (with the default batch size of 4096) to avoid the overhead of the TFRecord format, which otherwise is not suitable for the tabular data - it stores a separate dictionary with each feature name in plain text for every data entry. The preprocessing includes some very resource-exhausting operations, like joining 2B+ rows tables. Such operations may not fit into the RAM memory, therefore we decided to use Spark which is a suitable tool for handling tabular operations on large data. Note that the Spark job requires about 1 TB disk space and 500 GB RAM to perform the preprocessing. For more information about Spark, please refer to the [Spark documentation](https://spark.apache.org/docs/2.3.1/). ### Training process The training can be started by running the `trainer/task.py` script. By default the script is in train mode. Other training related configs are also present in the `trainer/task.py` and can be seen using the command `python -m trainer.task --help`. Training happens for `--num_epochs` epochs with a DNNLinearCombinedClassifier estimator for the model. The model has a wide linear part and a deep feed forward network, and the networks are built according to the default configuration. Two separate optimizers are used to optimize the wide and the deep part of the network: - FTLR (Follow the Regularized Leader) optimizer is used to optimize the wide part of the network. - Adagrad optimizer is used to optimize the deep part of the network. The training log will contain information about: - Loss value after every 100 steps. - Training throughput if `--benchmark` option is selected. - Evaluation metrics after every `--eval_epoch_interval` epochs. Checkpoints are stored with every evaluation at the `--model_dir` location. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training mode. #### Training performance benchmark We provide 8 scripts to benchmark the performance of training: ```bash bash scripts/DGXA100_benchmark_training_tf32_1gpu.sh bash scripts/DGXA100_benchmark_training_amp_1gpu.sh bash scripts/DGXA100_benchmark_training_tf32_8gpu.sh bash scripts/DGXA100_benchmark_training_amp_8gpu.sh bash scripts/DGX1_benchmark_training_fp32_1gpu.sh bash scripts/DGX1_benchmark_training_amp_1gpu.sh bash scripts/DGX1_benchmark_training_fp32_8gpu.sh bash scripts/DGX1_benchmark_training_amp_8gpu.sh ``` ### Results The following sections provide details on how we achieved our performance and accuracy in training. #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the `trainer/task.py` training script in the TensorFlow NGC container on NVIDIA DGX A100 with (8x A100 40GB) GPUs. |**GPUs**|**Batch size / GPU**|**Accuracy - TF32 (MAP@12)**|**Accuracy - mixed precision (MAP@12)**|**Time to train - TF32 (minutes)**|**Time to train - mixed precision (minutes)**|**Time to train speedup (TF32 to mixed precision)**| |-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:| | 1 | 131,072 | 0.67683 | 0.67632 | 341 | 359 | [-](#known-issues) | | 8 | 16,384 | 0.67709 | 0.67721 | 93 | 107 | [-](#known-issues) | To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the `trainer/task.py` training script in the TensorFlow NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. |**GPUs**|**Batch size / GPU**|**Accuracy - FP32 (MAP@12)**|**Accuracy - mixed precision (MAP@12)**|**Time to train - FP32 (minutes)**|**Time to train - mixed precision (minutes)**|**Time to train speedup (FP32 to mixed precision)**| |-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:| | 1 | 131,072 | 0.67648 | 0.67744 | 654 | 440 | 1.49 | | 8 | 16,384 | 0.67692 | 0.67725 | 190 | 185 | 1.03 | To achieve the same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training accuracy plots Models trained with FP32, TF32 and Automatic Mixed Precision (AMP) achieve similar precision. ![MAP12](img/lc20.06.png) ##### Training stability test The Wide and Deep model was trained for 54,713 training steps, starting from 6 different initial random seeds for each setup. The training was performed in the 20.10-tf1-py3 NGC container on NVIDIA DGX A100 40GB and DGX-1 16GB machines with and without mixed precision enabled. After training, the models were evaluated on the validation set. The following table summarizes the final MAP@12 score on the validation set. ||**Average MAP@12**|**Standard deviation**|**Minimum**|**Maximum**| |:-------|-----------------:|---------------------:|----------:|----------:| | DGX A100 TF32 | 0.67709 | 0.00094 | 0.67463 | 0.67813 | | DGX A100 mixed precision | 0.67721 | 0.00048 | 0.67643 | 0.67783 | | DGX-1 FP32 | 0.67692 | 0.00060 | 0.67587 | 0.67791 | | DGX-1 mixed precision | 0.67725 | 0.00064 | 0.67561 | 0.67803 | #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 40GB) Our results were obtained by running the benchmark scripts from the `scripts` directory in the TensorFlow NGC container on NVIDIA DGX A100 with (8x A100 40GB) GPUs. Improving model scaling for multi-GPU is [under development](#known-issues). |**GPUs**|**Batch size / GPU**|**Throughput - TF32 (samples/s)**|**Throughput - mixed precision (samples/s)**|**Strong scaling - TF32**|**Strong scaling - mixed precision**| |-------:|-------------------:|----------------------------:|---------------------------------------:|----------------------:|---------------------------------:| | 1 | 131,072 | 349,879 | 332,529 | 1.00 | 1.00 | | 8 | 16,384 | 1,283,457 | 1,111,976 | 3.67 | 3.34 | ##### Training performance: NVIDIA DGX-1 (8x V100 16GB) Our results were obtained by running the benchmark scripts from the `scripts` directory in the TensorFlow NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Improving model scaling for multi-GPU is [under development](#known-issues). |**GPUs**|**Batch size / GPU**|**Throughput - FP32 (samples/s)**|**Throughput - mixed precision (samples/s)**|**Throughput speedup (FP32 to mixed precision)**|**Strong scaling - FP32**|**Strong scaling - mixed precision**| |-------:|-------------------:|----------------------------:|---------------------------------------:|-----------------------------------------------:|----------------------:|---------------------------------:| | 1 | 131,072 | 182,510 | 271,366 | 1.49 | 1.00 | 1.00 | | 8 | 16,384 | 626,301 | 643,334 | 1.03 | 3.43 | 2.37 | ## Release notes ### Changelog April 2023 - Ceased maintenance of this model in TensorFlow1 November 2020 - Updated performance tables to include numbers from 20.10-tf1-py3 NGC container June 2020 - Updated performance tables to include A100 results April 2020 - Improved Spark preprocessing scripts performance March 2020 - Initial release ### Known issues - Limited tf.feature_column support - Limited scaling for multi-GPU because of inefficient handling of embedding operations (multiple memory transfers between CPU and GPU), work in progress to cover all the operations on GPU. - In this model the TF32 precision can in some cases be as fast as the FP16 precision on Ampere GPUs. This is because TF32 also uses Tensor Cores and doesn't need any additional logic such as maintaining FP32 master weights and casts. However, please note that W&D is, by modern recommender standards, a very small model. Larger models should still see significant benefits of using FP16 math.
TensorFlow/Translation/GNMT/scripts/docker
docker
build
#!/bin/bash docker build . --rm -t gnmt_tf
PaddlePaddle/Classification/RN50v1.5/utils
utils
misc
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __all__ = ['AverageMeter'] class AverageMeter: """ A container to keep running sum, mean and last value. """ def __init__(self, name='', fmt='f', postfix="", need_avg=True): self.name = name self.fmt = fmt self.postfix = postfix self.need_avg = need_avg self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count @property def total(self): return '{self.sum:{self.fmt}}{self.postfix}'.format(self=self)
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
eval_util_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for eval_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import tensorflow as tf from object_detection import eval_util from object_detection.core import standard_fields as fields from object_detection.protos import eval_pb2 from object_detection.utils import test_case class EvalUtilTest(test_case.TestCase, parameterized.TestCase): def _get_categories_list(self): return [{'id': 0, 'name': 'person'}, {'id': 1, 'name': 'dog'}, {'id': 2, 'name': 'cat'}] def _make_evaluation_dict(self, resized_groundtruth_masks=False, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): input_data_fields = fields.InputDataFields detection_fields = fields.DetectionResultFields image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8) if batch_size == 1: key = tf.constant('image1') else: key = tf.constant([str(i) for i in range(batch_size)]) detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]), multiples=[batch_size, 1, 1]) detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1]) detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1]) detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32), multiples=[batch_size, 1, 1, 1]) num_detections = tf.ones([batch_size]) groundtruth_boxes = tf.constant([[0., 0., 1., 1.]]) groundtruth_classes = tf.constant([1]) groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8) if resized_groundtruth_masks: groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8) if batch_size > 1: groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0), multiples=[batch_size, 1, 1]) groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0), multiples=[batch_size, 1]) groundtruth_instance_masks = tf.tile( tf.expand_dims(groundtruth_instance_masks, 0), multiples=[batch_size, 1, 1, 1]) detections = { detection_fields.detection_boxes: detection_boxes, detection_fields.detection_scores: detection_scores, detection_fields.detection_classes: detection_classes, detection_fields.detection_masks: detection_masks, detection_fields.num_detections: num_detections } groundtruth = { input_data_fields.groundtruth_boxes: groundtruth_boxes, input_data_fields.groundtruth_classes: groundtruth_classes, input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks } if batch_size > 1: return eval_util.result_dict_for_batched_example( image, key, detections, groundtruth, scale_to_absolute=scale_to_absolute, max_gt_boxes=max_gt_boxes) else: return eval_util.result_dict_for_single_example( image, key, detections, groundtruth, scale_to_absolute=scale_to_absolute) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op = metric_ops['DetectionBoxes_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertNotIn('DetectionMasks_Precision/mAP', metrics) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) def test_get_eval_metric_ops_for_coco_detections_and_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) @parameterized.parameters( {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True}, {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False}, {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False} ) def test_get_eval_metric_ops_for_coco_detections_and_resized_masks( self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict(batch_size=batch_size, max_gt_boxes=max_gt_boxes, scale_to_absolute=scale_to_absolute, resized_groundtruth_masks=True) metric_ops = eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP'] _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP'] with self.test_session() as sess: metrics = {} for key, (value_op, _) in metric_ops.iteritems(): metrics[key] = value_op sess.run(update_op_boxes) sess.run(update_op_masks) metrics = sess.run(metrics) self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP']) self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP']) def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['unsupported_metric']) categories = self._get_categories_list() eval_dict = self._make_evaluation_dict() with self.assertRaises(ValueError): eval_util.get_eval_metric_ops_for_evaluators( eval_config, categories, eval_dict) def test_get_eval_metric_ops_for_evaluators(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend( ['coco_detection_metrics', 'coco_mask_metrics']) eval_config.include_metrics_per_category = True evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) self.assertTrue(evaluator_options['coco_detection_metrics'][ 'include_metrics_per_category']) self.assertTrue(evaluator_options['coco_mask_metrics'][ 'include_metrics_per_category']) def test_get_evaluator_with_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator_options = eval_util.evaluator_options_from_eval_config( eval_config) evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options) self.assertTrue(evaluator[0]._include_metrics_per_category) def test_get_evaluator_with_no_evaluator_options(self): eval_config = eval_pb2.EvalConfig() eval_config.metrics_set.extend(['coco_detection_metrics']) eval_config.include_metrics_per_category = True categories = self._get_categories_list() evaluator = eval_util.get_evaluators( eval_config, categories, evaluator_options=None) # Even though we are setting eval_config.include_metrics_per_category = True # this option is never passed into the DetectionEvaluator constructor (via # `evaluator_options`). self.assertFalse(evaluator[0]._include_metrics_per_category) if __name__ == '__main__': tf.test.main()
TensorFlow/Detection/SSD/examples
examples
SSD320_FP16_1GPU_BENCHMARK
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. CKPT_DIR=${1:-"/results/SSD320_FP16_1GPU"} PIPELINE_CONFIG_PATH=${2:-"/workdir/models/research/configs"}"/ssd320_bench.config" GPUS=1 TENSOR_OPS=0 export TF_ENABLE_CUBLAS_TENSOR_OP_MATH_FP32=${TENSOR_OPS} export TF_ENABLE_CUDNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS} export TF_ENABLE_CUDNN_RNN_TENSOR_OP_MATH_FP32=${TENSOR_OPS} TRAIN_LOG=$(python -u ./object_detection/model_main.py \ --pipeline_config_path=${PIPELINE_CONFIG_PATH} \ --model_dir=${CKPT_DIR} \ --alsologtostder \ --amp \ "${@:3}" 2>&1) PERF=$(echo "$TRAIN_LOG" | sed -n 's|.*global_step/sec: \(\S\+\).*|\1|p' | python -c "import sys; x = sys.stdin.readlines(); x = [float(a) for a in x[int(len(x)*3/4):]]; print(32*$GPUS*sum(x)/len(x), 'img/s')") mkdir -p $CKPT_DIR echo "Single GPU mixed precision training performance: $PERF" | tee $CKPT_DIR/train_log echo "$TRAIN_LOG" >> $CKPT_DIR/train_log
TensorFlow/Recommendation/VAE-CF
VAE-CF
main
#!/usr/bin/python3 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from functools import partial import json import logging from argparse import ArgumentParser import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) import numpy as np import horovod.tensorflow as hvd from mpi4py import MPI import dllogger import time from vae.utils.round import round_8 from vae.metrics.recall import recall from vae.metrics.ndcg import ndcg from vae.models.train import VAE from vae.load.preprocessing import load_and_parse_ML_20M def main(): hvd.init() mpi_comm = MPI.COMM_WORLD parser = ArgumentParser(description="Train a Variational Autoencoder for Collaborative Filtering in TensorFlow") parser.add_argument('--train', action='store_true', help='Run training of VAE') parser.add_argument('--test', action='store_true', help='Run validation of VAE') parser.add_argument('--inference_benchmark', action='store_true', help='Measure inference latency and throughput on a variety of batch sizes') parser.add_argument('--amp', action='store_true', default=False, help='Enable Automatic Mixed Precision') parser.add_argument('--epochs', type=int, default=400, help='Number of epochs to train') parser.add_argument('--batch_size_train', type=int, default=24576, help='Global batch size for training') parser.add_argument('--batch_size_validation', type=int, default=10000, help='Used both for validation and testing') parser.add_argument('--validation_step', type=int, default=50, help='Train epochs for one validation') parser.add_argument('--warm_up_epochs', type=int, default=5, help='Number of epochs to omit during benchmark') parser.add_argument('--total_anneal_steps', type=int, default=15000, help='Number of annealing steps') parser.add_argument('--anneal_cap', type=float, default=0.1, help='Annealing cap') parser.add_argument('--lam', type=float, default=1.00, help='Regularization parameter') parser.add_argument('--lr', type=float, default=0.004, help='Learning rate') parser.add_argument('--beta1', type=float, default=0.90, help='Adam beta1') parser.add_argument('--beta2', type=float, default=0.90, help='Adam beta2') parser.add_argument('--top_results', type=int, default=100, help='Number of results to be recommended') parser.add_argument('--xla', action='store_true', default=False, help='Enable XLA') parser.add_argument('--trace', action='store_true', default=False, help='Save profiling traces') parser.add_argument('--activation', type=str, default='tanh', help='Activation function') parser.add_argument('--log_path', type=str, default='./vae_cf.log', help='Path to the detailed training log to be created') parser.add_argument('--seed', type=int, default=0, help='Random seed for TensorFlow and numpy') parser.add_argument('--data_dir', default='/data', type=str, help='Directory for storing the training data') parser.add_argument('--checkpoint_dir', type=str, default=None, help='Path for saving a checkpoint after the training') args = parser.parse_args() args.world_size = hvd.size() if args.batch_size_train % hvd.size() != 0: raise ValueError('Global batch size should be a multiple of the number of workers') args.local_batch_size = args.batch_size_train // hvd.size() logger = logging.getLogger("VAE") if hvd.rank() == 0: logger.setLevel(logging.INFO) dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.log_path), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)]) else: dllogger.init(backends=[]) logger.setLevel(logging.ERROR) dllogger.metadata("final_ndcg@100", {"unit": None}) dllogger.metadata("mean_inference_throughput", {"unit": "samples/s"}) dllogger.metadata("mean_training_throughput", {"unit": "samples/s"}) if args.seed is None: if hvd.rank() == 0: seed = int(time.time()) else: seed = None seed = mpi_comm.bcast(seed, root=0) else: seed = args.seed tf.random.set_random_seed(seed) np.random.seed(seed) args.seed = seed dllogger.log(data=vars(args), step='PARAMETER') # Suppress TF warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # set AMP os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1' if args.amp else '0' # load dataset (train_data, validation_data_input, validation_data_true, test_data_input, test_data_true) = load_and_parse_ML_20M(args.data_dir) # make sure all dims and sizes are divisible by 8 number_of_train_users, number_of_items = train_data.shape number_of_items = round_8(number_of_items) for data in [train_data, validation_data_input, validation_data_true, test_data_input, test_data_true]: number_of_users, _ = data.shape data.resize(number_of_users, number_of_items) number_of_users, number_of_items = train_data.shape encoder_dims = [number_of_items, 600, 200] vae = VAE(train_data, encoder_dims, total_anneal_steps=args.total_anneal_steps, anneal_cap=args.anneal_cap, batch_size_train=args.local_batch_size, batch_size_validation=args.batch_size_validation, lam=args.lam, lr=args.lr, beta1=args.beta1, beta2=args.beta2, activation=args.activation, xla=args.xla, checkpoint_dir=args.checkpoint_dir, trace=args.trace, top_results=args.top_results) metrics = {'ndcg@100': partial(ndcg, R=100), 'recall@20': partial(recall, R=20), 'recall@50': partial(recall, R=50)} if args.train: vae.train(n_epochs=args.epochs, validation_data_input=validation_data_input, validation_data_true=validation_data_true, metrics=metrics, validation_step=args.validation_step) if args.test and hvd.size() <= 1: test_results = vae.test(test_data_input=test_data_input, test_data_true=test_data_true, metrics=metrics) for k, v in test_results.items(): print("{}:\t{}".format(k, v)) elif args.test and hvd.size() > 1: print("Testing is not supported with horovod multigpu yet") elif args.test and hvd.size() > 1: print("Testing is not supported with horovod multigpu yet") if args.inference_benchmark: items_per_user = 10 item_indices = np.random.randint(low=0, high=10000, size=items_per_user) user_indices = np.zeros(len(item_indices)) indices = np.stack([user_indices, item_indices], axis=1) num_batches = 200 latencies = [] for i in range(num_batches): start_time = time.time() _ = vae.query(indices=indices) end_time = time.time() if i < 10: #warmup steps continue latencies.append(end_time - start_time) result_data = {} result_data[f'batch_1_mean_throughput'] = 1 / np.mean(latencies) result_data[f'batch_1_mean_latency'] = np.mean(latencies) result_data[f'batch_1_p90_latency'] = np.percentile(latencies, 90) result_data[f'batch_1_p95_latency'] = np.percentile(latencies, 95) result_data[f'batch_1_p99_latency'] = np.percentile(latencies, 99) dllogger.log(data=result_data, step=tuple()) vae.close_session() dllogger.flush() if __name__ == '__main__': main()
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssd_mobilenet_v1_focal_loss_pets
# SSD with Mobilenet v1, configured for Oxford-IIIT Pets Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. # TPU-compatible model { ssd { num_classes: 37 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 1 box_code_size: 4 apply_sigmoid_to_scores: false conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } } feature_extractor { type: 'ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 1.0 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } loss { classification_loss { weighted_sigmoid_focal { alpha: 0.75 gamma: 2.0 } } localization_loss { weighted_smooth_l1 { } } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { batch_size: 24 optimizer { rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true load_all_detection_checkpoint_vars: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } max_number_of_boxes: 50 unpad_groundtruth_tensors: false } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" num_examples: 1101 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf
tf
deploy_ensemble
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import os _config_template = ''' name: "{ensemble_name}" platform: "ensemble" max_batch_size: {max_batch_size} input [ {{ name: "categorical_features" data_type: TYPE_INT32 dims: [{num_cat_features}] }}, {{ name: "numerical_features" data_type: TYPE_FP32 dims: [{num_numerical_features}] }} ] output [ {{ name: "DENSE_OUTPUT" data_type: TYPE_FP32 dims: [1] }} ] ensemble_scheduling {{ step [ {{ model_name: "{sparse_model_name}" model_version: -1 input_map {{ key: "input_1" value: "categorical_features" }}, output_map {{ key: "output_1" value: "LOOKUP_VECTORS" }} }}, {{ model_name: "{dense_model_name}" model_version: -1 input_map {{ key: "args_1" value: "LOOKUP_VECTORS" }}, input_map {{ key: "args_0" value: "numerical_features" }}, output_map {{ key: "output_1" value: "DENSE_OUTPUT" }} }} ] }} ''' def deploy_ensemble(dst, model_name, sparse_model_name, dense_model_name, num_cat_features, num_numerical_features, max_batch_size, version): config_str = _config_template.format( ensemble_name=model_name, dense_model_name=dense_model_name, sparse_model_name=sparse_model_name, num_cat_features=num_cat_features, num_numerical_features=num_numerical_features, max_batch_size=max_batch_size ) with open(os.path.join(dst, "config.pbtxt"), "w") as f: f.write(config_str) os.mkdir(os.path.join(dst, str(version))) print("Ensemble configuration:") print(config_str)
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/bert
bert
preprocessing
# coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import json import numpy as np import collections from utils.bert.tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) class SquadExample(object): """ A single training/test example for the Squad dataset. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=None): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (self.qas_id) s += ", question_text: %s" % ( self.question_text) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.end_position: s += ", end_position: %d" % (self.end_position) if self.is_impossible: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_example(question_text, context, version_2_with_negative): """ reads a question and a context, and turns it into a SquadExample """ # def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False # doc_tokens = [] prev_is_whitespace = True for c in context: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False # example = SquadExample( qas_id=0, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False ) return example def convert_example_to_feature(example, tokenizer, max_seq_length, doc_stride, max_query_length): """ converts an example into a feature """ unique_id = 1000000000 examples = [example] features = [] for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None features.append( InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible)) unique_id += 1 assert len(features) == 1, "too large input" return features[0] def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def get_predictions(example, feature, start_logits, end_logits, n_best_size, max_answer_length, do_lower_case, version_2_with_negative, null_score_diff_threshold): """Write final predictions to the json file and log-odds of null if needed.""" all_examples = [example] all_features = [feature] all_results = [RawResult(unique_id=1000000000,start_logits=start_logits,end_logits=end_logits)] example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_indices_of_largest_logits(result.start_logits) end_indexes = _get_indices_of_largest_logits(result.end_logits) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest) == 1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text all_nbest_json[example.qas_id] = nbest_json return nbest_json def get_final_text(pred_text, orig_text, do_lower_case): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heruistic between # `pred_text` and `orig_text` to get a character-to-charcter alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def _get_indices_of_largest_logits(logits): """ sort logits and return the indices of the sorted array """ indices_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) indices = map(lambda x: x[0], indices_and_score) indices = list(indices) return indices
TensorFlow2/Detection/Efficientdet/object_detection
object_detection
box_coder
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base box coder. Box coders convert between coordinate frames, namely image-centric (with (0,0) on the top left of image) and anchor-centric (with (0,0) being defined by a specific anchor). Users of a BoxCoder can call two methods: encode: which encodes a box with respect to a given anchor (or rather, a tensor of boxes wrt a corresponding tensor of anchors) and decode: which inverts this encoding with a decode operation. In both cases, the arguments are assumed to be in 1-1 correspondence already; it is not the job of a BoxCoder to perform matching. """ from abc import ABCMeta from abc import abstractmethod from abc import abstractproperty import tensorflow.compat.v1 as tf # Box coder types. FASTER_RCNN = 'faster_rcnn' KEYPOINT = 'keypoint' MEAN_STDDEV = 'mean_stddev' SQUARE = 'square' class BoxCoder(object): """Abstract base class for box coder.""" __metaclass__ = ABCMeta @abstractproperty def code_size(self): """Return the size of each code. This number is a constant and should agree with the output of the `encode` op (e.g. if rel_codes is the output of self.encode(...), then it should have shape [N, code_size()]). This abstractproperty should be overridden by implementations. Returns: an integer constant """ pass def encode(self, boxes, anchors): """Encode a box list relative to an anchor collection. Args: boxes: BoxList holding N boxes to be encoded anchors: BoxList of N anchors Returns: a tensor representing N relative-encoded boxes """ with tf.name_scope('Encode'): return self._encode(boxes, anchors) def decode(self, rel_codes, anchors): """Decode boxes that are encoded relative to an anchor collection. Args: rel_codes: a tensor representing N relative-encoded boxes anchors: BoxList of anchors Returns: boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., with corners y_min, x_min, y_max, x_max) """ with tf.name_scope('Decode'): return self._decode(rel_codes, anchors) @abstractmethod def _encode(self, boxes, anchors): """Method to be overridden by implementations. Args: boxes: BoxList holding N boxes to be encoded anchors: BoxList of N anchors Returns: a tensor representing N relative-encoded boxes """ pass @abstractmethod def _decode(self, rel_codes, anchors): """Method to be overridden by implementations. Args: rel_codes: a tensor representing N relative-encoded boxes anchors: BoxList of anchors Returns: boxlist: BoxList holding N boxes encoded in the ordinary way (i.e., with corners y_min, x_min, y_max, x_max) """ pass def batch_decode(encoded_boxes, box_coder, anchors): """Decode a batch of encoded boxes. This op takes a batch of encoded bounding boxes and transforms them to a batch of bounding boxes specified by their corners in the order of [y_min, x_min, y_max, x_max]. Args: encoded_boxes: a float32 tensor of shape [batch_size, num_anchors, code_size] representing the location of the objects. box_coder: a BoxCoder object. anchors: a BoxList of anchors used to encode `encoded_boxes`. Returns: decoded_boxes: a float32 tensor of shape [batch_size, num_anchors, coder_size] representing the corners of the objects in the order of [y_min, x_min, y_max, x_max]. Raises: ValueError: if batch sizes of the inputs are inconsistent, or if the number of anchors inferred from encoded_boxes and anchors are inconsistent. """ encoded_boxes.get_shape().assert_has_rank(3) if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static(): raise ValueError('The number of anchors inferred from encoded_boxes' ' and anchors are inconsistent: shape[1] of encoded_boxes' ' %s should be equal to the number of anchors: %s.' % (encoded_boxes.get_shape()[1].value, anchors.num_boxes_static())) decoded_boxes = tf.stack([ box_coder.decode(boxes, anchors).get() for boxes in tf.unstack(encoded_boxes) ]) return decoded_boxes
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
taco2Utils
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2Utils.h" #include "cuda_fp16.h" #include <cassert> #include <sstream> using namespace nvinfer1; namespace taco2 { /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ __global__ void floatsToHalvesKernel( const float2* const floats, __half2* const halves, const int num) { const int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < num) { halves[idx] = __float22half2_rn(floats[idx]); } } /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ int Taco2Utils::roundUpBlocks(const int num, const int blockSize) { if (num < 0) { throw std::runtime_error( "Taco2Utils::roundUpBlocks(): Number of items must be non-negative: " + std::to_string(num)); } else if (blockSize <= 0) { throw std::runtime_error( "Taco2Utils::roundUpBlocks(): Invalid block size: " + std::to_string(blockSize)); } else if (num == 0) { // avoid underflow return 0; } else { return ((num - 1) / blockSize) + 1; } } std::vector<float> Taco2Utils::toFloatVector(const Weights& weights) { if (weights.type != DataType::kFLOAT) { throw std::runtime_error( "Invalid data type for LSTMCell weights: " + std::to_string(static_cast<int>(weights.type))); } const float* const valuesBegin = static_cast<const float*>(weights.values); const float* const valuesEnd = valuesBegin + weights.count; return std::vector<float>(valuesBegin, valuesEnd); } std::string Taco2Utils::dimsToString(const Dims& dim) { std::ostringstream oss; oss << "{"; for (int i = 0; i < dim.nbDims; ++i) { oss << dim.d[i] << " "; } oss << "}"; return oss.str(); } size_t Taco2Utils::getDimensionsSize(const Dims& dims) { size_t i = 1; for (int d = 0; d < dims.nbDims; ++d) { if (dims.d[d] == -1) { if (d == 0) { // ignore batch dimension } else { throw std::runtime_error("Cannot get size of tensor with dynamic " "dimension."); } } else { assert(dims.d[d] > 0); i *= dims.d[d]; } } return i; } Dims Taco2Utils::getCompactedDims(const Dims& dims, const int minLength) { Dims cDims{0, {}, {}}; if (dims.nbDims) { for (int d = 0; d < dims.nbDims; ++d) { if (dims.d[d] > 1) { cDims.d[cDims.nbDims++] = dims.d[d]; } } if (cDims.nbDims == 0) { cDims.nbDims = 1; cDims.d[0] = 1; } } if (cDims.nbDims < minLength) { const int offset = minLength - cDims.nbDims; for (int i = cDims.nbDims; i > 0;) { --i; cDims.d[i + offset] = cDims.d[i]; } for (int i = 0; i < offset; ++i) { cDims.d[i] = 1; } cDims.nbDims = minLength; } return cDims; } void Taco2Utils::floatsToHalves( const float* floats, float* halves, const size_t num) { if (num % 2 != 0) { throw std::runtime_error("Cannot convert odd number of floats to havles."); } const size_t halfNum = num / 2; const dim3 block(1024); const dim3 grid(roundUpBlocks(halfNum, block.x)); floatsToHalvesKernel<<<grid, block>>>( reinterpret_cast<const float2*>(floats), reinterpret_cast<__half2*>(halves), halfNum); } } // namespace taco2
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/unidecoder
unidecoder
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import io import warnings from .homoglyphs import homoglyphs from .replacements import replacements _replacements = {uni: asc for uni, asc in replacements} _homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs} def unidecoder(s, homoglyphs=False): """Transliterate unicode Args: s (str): unicode string homoglyphs (bool): prioritize translating to homoglyphs """ warned = False # Once per utterance ret = '' for u in s: if ord(u) < 127: a = u elif homoglyphs: a = _homoglyphs.get(u, _replacements.get(u, None)) else: a = _replacements.get(u, _homoglyphs.get(u, None)) if a is None: if not warned: warnings.warn(f'Unexpected character {u}: ' 'please revise your text cleaning rules.', stacklevel=10**6) warned = True else: ret += a return ret
PyTorch/SpeechRecognition/QuartzNet/platform
platform
DGX2_QuartzNet_FP32_16GPU
#!/bin/bash set -a : ${NUM_GPUS:=16} : ${GPU_BATCH_SIZE:=36} : ${GRAD_ACCUMULATION:=2} : ${AMP=:false} bash scripts/train.sh "$@"
TensorFlow/Classification/ConvNets/utils
utils
learning_rate
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf __all__ = ['learning_rate_scheduler'] def learning_rate_scheduler(lr_init, lr_warmup_epochs, global_step, batch_size, num_batches_per_epoch, num_decay_steps, num_gpus, use_cosine_lr): def get_scaled_base_learning_rate(): """Calculates base learning rate for creating lr schedule. In replicated mode, gradients are summed rather than averaged which, with the sgd and momentum optimizers, increases the effective learning rate by lr * num_gpus. Dividing the base lr by num_gpus negates the increase. Args: batch_size: Total batch-size. Returns: Base learning rate to use to create lr schedule. """ base_lr = lr_init * num_gpus # Starting LR = 0.1 with BS = 256, else linearly scale return base_lr * (batch_size / 256.0) rescaled_lr = get_scaled_base_learning_rate() if use_cosine_lr: print("Using cosine learning rate schedule") lr = tf.train.cosine_decay(rescaled_lr, global_step, num_decay_steps) else: print("Using step learning rate schedule") boundaries = [int(num_batches_per_epoch * x) for x in [30, 60, 80, 90]] values = [1e0, 1e-1, 1e-2, 1e-3, 1e-4] values = [rescaled_lr * v for v in values] lr = tf.train.piecewise_constant(global_step, boundaries, values) warmup_steps = int(num_batches_per_epoch * lr_warmup_epochs) warmup_lr = (rescaled_lr * tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)) return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
TensorFlow/Segmentation/VNet/utils
utils
data_loader
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import math import multiprocessing import os import SimpleITK as sitk import horovod.tensorflow as hvd import numpy as np import tensorflow as tf from scipy import stats def parse_nifti(path, dtype, dst_size, interpolator, normalization=None, modality=None): sitk_image = load_image(path) sitk_image = resize_image(sitk_image, dst_size=dst_size, interpolator=interpolator) image = sitk_to_np(sitk_image) if modality and 'CT' not in modality: if normalization: image = stats.zscore(image, axis=None) elif modality: raise NotImplementedError return image def make_ref_image(img_path, dst_size, interpolator): ref_image = load_image(img_path) ref_image = resize_image(ref_image, dst_size=dst_size, interpolator=interpolator) return sitk_to_np(ref_image) / np.max(ref_image) * 255 def make_interpolator(interpolator): if interpolator == 'linear': return sitk.sitkLinear else: raise ValueError("Unknown interpolator type") def load_image(img_path): image = sitk.ReadImage(img_path) if image.GetDimension() == 4: image = sitk.GetImageFromArray(sitk.GetArrayFromImage(image)[-1, :, :, :]) if image.GetPixelID() != sitk.sitkFloat32: return sitk.Cast(image, sitk.sitkFloat32) return image def sitk_to_np(sitk_img): return np.transpose(sitk.GetArrayFromImage(sitk_img), [2, 1, 0]) def resize_image(sitk_img, dst_size=(128, 128, 64), interpolator=sitk.sitkNearestNeighbor): reference_image = sitk.Image(dst_size, sitk_img.GetPixelIDValue()) reference_image.SetOrigin(sitk_img.GetOrigin()) reference_image.SetDirection(sitk_img.GetDirection()) reference_image.SetSpacing( [sz * spc / nsz for nsz, sz, spc in zip(dst_size, sitk_img.GetSize(), sitk_img.GetSpacing())]) return sitk.Resample(sitk_img, reference_image, sitk.Transform(3, sitk.sitkIdentity), interpolator) class MSDJsonParser: def __init__(self, json_path): with open(json_path) as f: data = json.load(f) self._labels = data.get('labels') self._x_train = [os.path.join(os.path.dirname(json_path), p['image']) for p in data.get('training')] self._y_train = [os.path.join(os.path.dirname(json_path), p['label']) for p in data.get('training')] self._x_test = [os.path.join(os.path.dirname(json_path), p) for p in data.get('test')] self._modality = [data.get('modality')[k] for k in data.get('modality').keys()] @property def labels(self): return self._labels @property def x_train(self): return self._x_train @property def y_train(self): return self._y_train @property def x_test(self): return self._x_test @property def modality(self): return self._modality def make_split(json_parser, train_split, split_seed=0): np.random.seed(split_seed) train_size = int(len(json_parser.x_train) * train_split) return np.array(json_parser.x_train)[:train_size], np.array(json_parser.y_train)[:train_size], \ np.array(json_parser.x_train)[train_size:], np.array(json_parser.y_train)[train_size:] class MSDDataset(object): def __init__(self, json_path, dst_size=[128, 128, 64], seed=None, interpolator=None, data_normalization=None, batch_size=1, train_split=1.0, split_seed=0): self._json_parser = MSDJsonParser(json_path) self._interpolator = make_interpolator(interpolator) self._ref_image = make_ref_image(img_path=self._json_parser.x_test[0], dst_size=dst_size, interpolator=self._interpolator) np.random.seed(split_seed) self._train_img, self._train_label, \ self._eval_img, self._eval_label = make_split(self._json_parser, train_split) self._test_img = np.array(self._json_parser.x_test) self._dst_size = dst_size self._seed = seed self._batch_size = batch_size self._train_split = train_split self._data_normalization = data_normalization np.random.seed(self._seed) @property def labels(self): return self._json_parser.labels @property def train_steps(self): global_batch_size = hvd.size() * self._batch_size return math.ceil( len(self._train_img) / global_batch_size) @property def eval_steps(self): return math.ceil(len(self._eval_img) / self._batch_size) @property def test_steps(self): return math.ceil(len(self._test_img) / self._batch_size) def _parse_image(self, img): return parse_nifti(path=img, dst_size=self._dst_size, dtype=tf.float32, interpolator=self._interpolator, normalization=self._data_normalization, modality=self._json_parser.modality) def _parse_label(self, label): return parse_nifti(path=label, dst_size=self._dst_size, dtype=tf.int32, interpolator=sitk.sitkNearestNeighbor) def _augment(self, x, y): # Horizontal flip h_flip = tf.random_uniform([]) > 0.5 x = tf.cond(h_flip, lambda: tf.image.flip_left_right(x), lambda: x) y = tf.cond(h_flip, lambda: tf.image.flip_left_right(y), lambda: y) # Vertical flip v_flip = tf.random_uniform([]) > 0.5 x = tf.cond(v_flip, lambda: tf.image.flip_up_down(x), lambda: x) y = tf.cond(v_flip, lambda: tf.image.flip_up_down(y), lambda: y) return x, y def _img_generator(self, collection): for element in collection: yield self._parse_image(element) def _label_generator(self, collection): for element in collection: yield self._parse_label(element) def train_fn(self, augment): images = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._train_img), output_types=tf.float32, output_shapes=(32, 32, 32)) labels = tf.data.Dataset.from_generator(generator=lambda: self._label_generator(self._train_label), output_types=tf.int32, output_shapes=(32, 32, 32)) dataset = tf.data.Dataset.zip((images, labels)) dataset = dataset.cache() dataset = dataset.repeat() dataset = dataset.shuffle(buffer_size=self._batch_size * 2, reshuffle_each_iteration=True, seed=self._seed) dataset = dataset.shard(hvd.size(), hvd.rank()) if augment: dataset = dataset.apply( tf.data.experimental.map_and_batch(map_func=self._augment, batch_size=self._batch_size, drop_remainder=True, num_parallel_calls=multiprocessing.cpu_count())) else: dataset = dataset.batch(self._batch_size) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return dataset def eval_fn(self): images = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._eval_img), output_types=tf.float32, output_shapes=(32, 32, 32)) labels = tf.data.Dataset.from_generator(generator=lambda: self._label_generator(self._eval_label), output_types=tf.int32, output_shapes=(32, 32, 32)) dataset = tf.data.Dataset.zip((images, labels)) dataset = dataset.cache() dataset = dataset.batch(self._batch_size, drop_remainder=True) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return dataset def test_fn(self, count=1): dataset = tf.data.Dataset.from_generator(generator=lambda: self._img_generator(self._test_img), output_types=tf.float32, output_shapes=(32, 32, 32)) dataset = dataset.cache() dataset = dataset.repeat(count=count) dataset = dataset.batch(self._batch_size, drop_remainder=True) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) return dataset
PyTorch/SpeechSynthesis/HiFiGAN
HiFiGAN
prepare_dataset
# ***************************************************************************** # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** import argparse from pathlib import Path import torch import tqdm import dllogger as DLLogger from dllogger import StdOutBackend, JSONStreamBackend, Verbosity from torch.utils.data import DataLoader from fastpitch.data_function import TTSCollate, TTSDataset def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('-d', '--dataset-path', type=str, default='./', help='Path to dataset') parser.add_argument('--wav-text-filelists', required=True, nargs='+', type=str, help='Files with audio paths and text') parser.add_argument('--extract-mels', action='store_true', help='Calculate spectrograms from .wav files') parser.add_argument('--extract-pitch', action='store_true', help='Extract pitch') parser.add_argument('--log-file', type=str, default='preproc_log.json', help='Filename for logging') parser.add_argument('--n-speakers', type=int, default=1) # Mel extraction parser.add_argument('--max-wav-value', default=32768.0, type=float, help='Maximum audiowave value') parser.add_argument('--sampling-rate', default=22050, type=int, help='Sampling rate') parser.add_argument('--filter-length', default=1024, type=int, help='Filter length') parser.add_argument('--hop-length', default=256, type=int, help='Hop (stride) length') parser.add_argument('--win-length', default=1024, type=int, help='Window length') parser.add_argument('--mel-fmin', default=0.0, type=float, help='Minimum mel frequency') parser.add_argument('--mel-fmax', default=8000.0, type=float, help='Maximum mel frequency') parser.add_argument('--n-mel-channels', type=int, default=80) # Pitch extraction parser.add_argument('--f0-method', default='pyin', type=str, choices=('pyin',), help='F0 estimation method') # Performance parser.add_argument('-b', '--batch-size', default=1, type=int) parser.add_argument('--n-workers', type=int, default=16) return parser def main(): parser = argparse.ArgumentParser(description='TTS Data Pre-processing') parser = parse_args(parser) args, unk_args = parser.parse_known_args() if len(unk_args) > 0: raise ValueError(f'Invalid options {unk_args}') DLLogger.init(backends=[ JSONStreamBackend(Verbosity.DEFAULT, Path(args.dataset_path, args.log_file)), StdOutBackend(Verbosity.VERBOSE)]) for k, v in vars(args).items(): DLLogger.log(step="PARAMETER", data={k: v}) DLLogger.flush() if args.extract_mels: Path(args.dataset_path, 'mels').mkdir(parents=False, exist_ok=True) if args.extract_pitch: Path(args.dataset_path, 'pitch').mkdir(parents=False, exist_ok=True) for filelist in args.wav_text_filelists: print(f'Processing {filelist}...') dataset = TTSDataset( args.dataset_path, filelist, text_cleaners=['english_cleaners_v2'], n_mel_channels=args.n_mel_channels, p_arpabet=0.0, n_speakers=args.n_speakers, load_mel_from_disk=False, load_pitch_from_disk=False, pitch_mean=None, pitch_std=None, max_wav_value=args.max_wav_value, sampling_rate=args.sampling_rate, filter_length=args.filter_length, hop_length=args.hop_length, win_length=args.win_length, mel_fmin=args.mel_fmin, mel_fmax=args.mel_fmax, betabinomial_online_dir=None, pitch_online_dir=None, pitch_online_method=args.f0_method if args.extract_pitch else None) data_loader = DataLoader( dataset, batch_size=args.batch_size, shuffle=False, sampler=None, num_workers=args.n_workers, collate_fn=TTSCollate(), pin_memory=False, drop_last=False) all_filenames = set() for i, batch in enumerate(tqdm.tqdm(data_loader)): _, input_lens, mels, mel_lens, _, pitch, _, _, attn_prior, fpaths = batch # Ensure filenames are unique for p in fpaths: fname = Path(p).name if fname in all_filenames: raise ValueError(f'Filename is not unique: {fname}') all_filenames.add(fname) if args.extract_mels: for j, mel in enumerate(mels): fname = Path(fpaths[j]).with_suffix('.pt').name fpath = Path(args.dataset_path, 'mels', fname) torch.save(mel[:, :mel_lens[j]], fpath) if args.extract_pitch: for j, p in enumerate(pitch): fname = Path(fpaths[j]).with_suffix('.pt').name fpath = Path(args.dataset_path, 'pitch', fname) torch.save(p[:mel_lens[j]], fpath) if __name__ == '__main__': main()
Tools/PyTorch/TimeSeriesPredictionPlatform
TimeSeriesPredictionPlatform
criterion
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F class TSPP_criterion_wrapper(nn.Module): '''This wrapper unifies definition of forward function across different criterions''' def __init__(self, criterion, cl_start_horizon=None, cl_update=None): super().__init__() self.criterion = criterion self.base_arguments = set(criterion.forward.__code__.co_varnames) self.additional_arguments = {'weights'} self.allowed_arguments = self.base_arguments.union(self.additional_arguments) # Curriciulum learning assert bool(cl_start_horizon) == bool(cl_update), "Both cl_start_horizon and cl_update have to be set or unset at the same time" self.curr_horizon = cl_start_horizon self.horizon_update = cl_update self.cl_counter = 0 def forward(self, preds, labels, weights=None, **kwargs): disallowed_kwargs = set(kwargs.keys()) - self.allowed_arguments if disallowed_kwargs: raise TypeError(f'Invalid keyword arguments {disallowed_kwargs} for {type(self.criterion)}') kwargs = {name:arg for name, arg in kwargs.items() if name in self.base_arguments} if self.training: if self.curr_horizon: preds = preds[:, :self.curr_horizon] labels = labels[:, :self.curr_horizon] weights = weights[:, :self.curr_horizon] if weights is not None else None if (self.cl_counter + 1) % self.horizon_update == 0: self.curr_horizon += 1 self.cl_counter += 1 # We expect preds to be shaped batch_size x time x num_estimators in 3D case # or batch_size x time x num_targets x num_estimators in 4D case if len(preds.shape) == 4 and len(labels.shape) == 3: labels = labels.unsqueeze(-1) if weights is not None: weights = weights.unsqueeze(-1) loss = self.criterion(preds, labels, **kwargs) if weights is not None and weights.numel(): # Presence of weights is detected on config level. Loss is reduced accordingly loss *= weights loss = loss.view(-1, *loss.shape[2:]).mean(0) return loss class QuantileLoss(nn.Module): def __init__(self, quantiles, reduction='mean'): super().__init__() self.quantiles = quantiles self.reduce = reduction == 'mean' def forward(self, predictions, targets,weights=None): if not hasattr(self, 'q'): self.register_buffer('q', predictions.new(self.quantiles)) diff = predictions - targets losses = (1-self.q) * F.relu(diff) + self.q * F.relu(-diff) if self.reduce: losses = losses.view(-1, losses.shape[-1]).mean(0) return losses class GaussianLogLikelihood(nn.Module): def __init__(self, reduction='mean'): super().__init__() self.reduce = reduction == 'mean' def forward(self, predictions, targets): # Inputs with shape [BS, window, 2] (mean + std) # Targets with shape [BS, window, 1] mu = predictions[..., 0:1] sigma = predictions[..., 1:2] distribution = torch.distributions.normal.Normal(mu, sigma) likelihood = distribution.log_prob(targets) likelihood = -likelihood.view(targets.shape[0], targets.shape[1]) loss = torch.unsqueeze(likelihood,-1) if self.reduce: loss = loss.mean(0) return loss
TensorFlow/Detection/SSD/models/research/object_detection/box_coders
box_coders
faster_rcnn_box_coder
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Faster RCNN box coder. Faster RCNN box coder follows the coding schema described below: ty = (y - ya) / ha tx = (x - xa) / wa th = log(h / ha) tw = log(w / wa) where x, y, w, h denote the box's center coordinates, width and height respectively. Similarly, xa, ya, wa, ha denote the anchor's center coordinates, width and height. tx, ty, tw and th denote the anchor-encoded center, width and height respectively. See http://arxiv.org/abs/1506.01497 for details. """ import tensorflow as tf from object_detection.core import box_coder from object_detection.core import box_list EPSILON = 1e-8 class FasterRcnnBoxCoder(box_coder.BoxCoder): """Faster RCNN box coder.""" def __init__(self, scale_factors=None): """Constructor for FasterRcnnBoxCoder. Args: scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If set to None, does not perform scaling. For Faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. """ if scale_factors: assert len(scale_factors) == 4 for scalar in scale_factors: assert scalar > 0 self._scale_factors = scale_factors @property def code_size(self): return 4 def _encode(self, boxes, anchors): """Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.log(w / wa) th = tf.log(h / ha) # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] return tf.transpose(tf.stack([ty, tx, th, tw])) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
rfcn_resnet101_coco
# R-FCN with Resnet-101 (v1), configuration for MSCOCO Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet101' first_stage_features_stride: 16 } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } first_stage_nms_score_threshold: 0.0 first_stage_nms_iou_threshold: 0.7 first_stage_max_proposals: 300 first_stage_localization_loss_weight: 2.0 first_stage_objectness_loss_weight: 1.0 second_stage_box_predictor { rfcn_box_predictor { conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } crop_height: 18 crop_width: 18 num_spatial_bins_height: 3 num_spatial_bins_width: 3 } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.0 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } second_stage_localization_loss_weight: 2.0 second_stage_classification_loss_weight: 1.0 } } train_config: { batch_size: 1 optimizer { momentum_optimizer: { learning_rate: { manual_step_learning_rate { initial_learning_rate: 0.0003 schedule { step: 900000 learning_rate: .00003 } schedule { step: 1200000 learning_rate: .000003 } } } momentum_optimizer_value: 0.9 } use_moving_average: false } gradient_clipping_by_norm: 10.0 fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { num_examples: 8000 # Note: The below line limits the evaluation process to 10 evaluations. # Remove the below line to evaluate indefinitely. max_evals: 10 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/tf
tf
constants
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) emb_output_name = "OUTPUT0" ens_lookup_tensors_name = "LOOKUP_VECTORS" dense_input1_name = "args_1" ens_numerical_features_name = "numerical_features" dense_numerical_features_name = "args_0" dense_output_name = "output_1" ens_output_name = "DENSE_OUTPUT"
PaddlePaddle/LanguageModeling/BERT
BERT
run_pretraining
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import logging import paddle import paddle.distributed.fleet as fleet from utils.config import parse_args, print_args from utils.save_load import init_program from utils.logger import setup_loggers from utils.affinity import set_cpu_affinity from utils.utility import set_seed, get_trainer_id, get_num_trainers import program import dllogger from lddl.paddle import get_bert_pretrain_data_loader def main(): """ An enterpoint to train a BERT model, which contains five steps. 1. Parse arguments from command line. 2. Initialize distributed training related setting, including CPU affinity. 3. Create training Paddle.static.Program. 4. Load checkpoint or pretrained model if given. 5. Run program (train with datasets and save model if necessary). """ now = time.time() args = parse_args() setup_loggers(args.report_file) if args.show_config: print_args(args) device = paddle.set_device('gpu') fleet.init(is_collective=True) if args.enable_cpu_affinity: set_cpu_affinity() # Create the random seed for the worker set_seed(args.seed + get_trainer_id()) dllogger.log(step="PARAMETER", data={"SEED": args.seed}) dllogger.log(step="PARAMETER", data={"train_start": True}) dllogger.log(step="PARAMETER", data={"batch_size_per_gpu": args.batch_size}) dllogger.log(step="PARAMETER", data={"learning_rate": args.learning_rate}) main_program = paddle.static.default_main_program() startup_program = paddle.static.default_startup_program() model, lr_scheduler, optimizer, loss, feeds = program.build( args, main_program, startup_program) exe = paddle.static.Executor(device) exe.run(startup_program) progress = init_program(args, program=main_program, exe=exe, model=model) train_dataloader = get_bert_pretrain_data_loader( args.input_dir, vocab_file=args.vocab_file, data_loader_kwargs={ 'batch_size': args.batch_size, 'num_workers': args.num_workers, 'persistent_workers': True, 'feed_list': feeds }, base_seed=args.seed, log_dir=None if args.output_dir is None else os.path.join(args.output_dir, 'lddl_log'), log_level=logging.WARNING, start_epoch=0 if progress is None else progress.get("epoch", 0), sequence_length_alignment=64) if args.amp: optimizer.amp_init(device) global_step, actual_steps_this_run, final_loss, train_time_raw = program.run( exe, main_program, args, lr_scheduler, loss, train_dataloader, progress) if get_trainer_id() == 0: e2e_time = time.time() - now if args.benchmark: training_perf = args.batch_size * args.gradient_merge_steps * ( actual_steps_this_run - args.benchmark_warmup_steps ) * get_num_trainers() / train_time_raw else: training_perf = args.batch_size * args.gradient_merge_steps * actual_steps_this_run * get_num_trainers( ) / train_time_raw dllogger.log(step=tuple(), data={ "e2e_train_time": e2e_time, "training_sequences_per_second": training_perf, "final_loss": final_loss, "raw_train_time": train_time_raw }) if __name__ == "__main__": paddle.enable_static() main()
PyTorch/Detection/SSD/ssd
ssd
model
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152 class ResNet(nn.Module): def __init__(self, backbone='resnet50', backbone_path=None, weights="IMAGENET1K_V1"): super().__init__() if backbone == 'resnet18': backbone = resnet18(weights=None if backbone_path else weights) self.out_channels = [256, 512, 512, 256, 256, 128] elif backbone == 'resnet34': backbone = resnet34(weights=None if backbone_path else weights) self.out_channels = [256, 512, 512, 256, 256, 256] elif backbone == 'resnet50': backbone = resnet50(weights=None if backbone_path else weights) self.out_channels = [1024, 512, 512, 256, 256, 256] elif backbone == 'resnet101': backbone = resnet101(weights=None if backbone_path else weights) self.out_channels = [1024, 512, 512, 256, 256, 256] else: # backbone == 'resnet152': backbone = resnet152(weights=None if backbone_path else weights) self.out_channels = [1024, 512, 512, 256, 256, 256] if backbone_path: backbone.load_state_dict(torch.load(backbone_path)) self.feature_extractor = nn.Sequential(*list(backbone.children())[:7]) conv4_block1 = self.feature_extractor[-1][0] conv4_block1.conv1.stride = (1, 1) conv4_block1.conv2.stride = (1, 1) conv4_block1.downsample[0].stride = (1, 1) def forward(self, x): x = self.feature_extractor(x) return x class SSD300(nn.Module): def __init__(self, backbone=ResNet('resnet50')): super().__init__() self.feature_extractor = backbone self.label_num = 81 # number of COCO classes self._build_additional_features(self.feature_extractor.out_channels) self.num_defaults = [4, 6, 6, 6, 4, 4] self.loc = [] self.conf = [] for nd, oc in zip(self.num_defaults, self.feature_extractor.out_channels): self.loc.append(nn.Conv2d(oc, nd * 4, kernel_size=3, padding=1)) self.conf.append(nn.Conv2d(oc, nd * self.label_num, kernel_size=3, padding=1)) self.loc = nn.ModuleList(self.loc) self.conf = nn.ModuleList(self.conf) self._init_weights() def _build_additional_features(self, input_size): self.additional_blocks = [] for i, (input_size, output_size, channels) in enumerate(zip(input_size[:-1], input_size[1:], [256, 256, 128, 128, 128])): if i < 3: layer = nn.Sequential( nn.Conv2d(input_size, channels, kernel_size=1, bias=False), nn.BatchNorm2d(channels), nn.ReLU(inplace=True), nn.Conv2d(channels, output_size, kernel_size=3, padding=1, stride=2, bias=False), nn.BatchNorm2d(output_size), nn.ReLU(inplace=True), ) else: layer = nn.Sequential( nn.Conv2d(input_size, channels, kernel_size=1, bias=False), nn.BatchNorm2d(channels), nn.ReLU(inplace=True), nn.Conv2d(channels, output_size, kernel_size=3, bias=False), nn.BatchNorm2d(output_size), nn.ReLU(inplace=True), ) self.additional_blocks.append(layer) self.additional_blocks = nn.ModuleList(self.additional_blocks) def _init_weights(self): layers = [*self.additional_blocks, *self.loc, *self.conf] for layer in layers: for param in layer.parameters(): if param.dim() > 1: nn.init.xavier_uniform_(param) # Shape the classifier to the view of bboxes def bbox_view(self, src, loc, conf): ret = [] for s, l, c in zip(src, loc, conf): ret.append((l(s).reshape(s.size(0), 4, -1), c(s).reshape(s.size(0), self.label_num, -1))) locs, confs = list(zip(*ret)) locs, confs = torch.cat(locs, 2).contiguous(), torch.cat(confs, 2).contiguous() return locs, confs def forward(self, x): x = self.feature_extractor(x) detection_feed = [x] for l in self.additional_blocks: x = l(x) detection_feed.append(x) # Feature Map 38x38x4, 19x19x6, 10x10x6, 5x5x6, 3x3x4, 1x1x4 locs, confs = self.bbox_view(detection_feed, self.loc, self.conf) # For SSD 300, shall return nbatch x 8732 x {nlabels, nlocs} results return locs, confs class Loss(nn.Module): """ Implements the loss as the sum of the followings: 1. Confidence Loss: All labels, with hard negative mining 2. Localization Loss: Only on positive labels Suppose input dboxes has the shape 8732x4 """ def __init__(self, dboxes): super(Loss, self).__init__() self.scale_xy = 1.0/dboxes.scale_xy self.scale_wh = 1.0/dboxes.scale_wh self.sl1_loss = nn.SmoothL1Loss(reduction='none') self.dboxes = nn.Parameter(dboxes(order="xywh").transpose(0, 1).unsqueeze(dim = 0), requires_grad=False) # Two factor are from following links # http://jany.st/post/2017-11-05-single-shot-detector-ssd-from-scratch-in-tensorflow.html self.con_loss = nn.CrossEntropyLoss(reduction='none') def _loc_vec(self, loc): """ Generate Location Vectors """ gxy = self.scale_xy*(loc[:, :2, :] - self.dboxes[:, :2, :])/self.dboxes[:, 2:, ] gwh = self.scale_wh*(loc[:, 2:, :]/self.dboxes[:, 2:, :]).log() return torch.cat((gxy, gwh), dim=1).contiguous() def forward(self, ploc, plabel, gloc, glabel): """ ploc, plabel: Nx4x8732, Nxlabel_numx8732 predicted location and labels gloc, glabel: Nx4x8732, Nx8732 ground truth location and labels """ mask = glabel > 0 pos_num = mask.sum(dim=1) vec_gd = self._loc_vec(gloc) # sum on four coordinates, and mask sl1 = self.sl1_loss(ploc, vec_gd).sum(dim=1) sl1 = (mask.float()*sl1).sum(dim=1) # hard negative mining con = self.con_loss(plabel, glabel) # postive mask will never selected con_neg = con.clone() con_neg[mask] = 0 _, con_idx = con_neg.sort(dim=1, descending=True) _, con_rank = con_idx.sort(dim=1) # number of negative three times positive neg_num = torch.clamp(3*pos_num, max=mask.size(1)).unsqueeze(-1) neg_mask = con_rank < neg_num #print(con.shape, mask.shape, neg_mask.shape) closs = (con*((mask + neg_mask).float())).sum(dim=1) # avoid no object detected total_loss = sl1 + closs num_mask = (pos_num > 0).float() pos_num = pos_num.float().clamp(min=1e-6) ret = (total_loss*num_mask/pos_num).mean(dim=0) return ret
PyTorch/Detection/Efficientdet/scripts/D0
D0
train-benchmark_TF32_A100-80G
#!/bin/bash function get_dataloader_workers { gpus=$(nvidia-smi -i 0 --query-gpu=count --format=csv,noheader) core=$(nproc --all) workers=$((core/gpus-2)) workers=$((workers>16?16:workers)) echo ${workers} } WORKERS=$(get_dataloader_workers) ./distributed_train.sh ${NUM_PROC:-8} /workspace/object_detection/datasets/coco --model efficientdet_d0 -b 80 --lr 0.9 --opt fusedmomentum --warmup-epochs 50 --lr-noise 0.4 0.9 --output /model --worker ${WORKERS} --fill-color mean --model-ema --model-ema-decay 0.999 --eval-after 200 --epochs 5 --resume --smoothing 0.0 --pretrained-backbone-path /backbone_checkpoints/jocbackbone_statedict_B0.pth --memory-format nchw --sync-bn --fused-focal-loss --seed 12711 --benchmark-steps 500 --benchmark
Tools/PyTorch/TimeSeriesPredictionPlatform/data
data
data_utils
# Copyright 2021-2022 NVIDIA Corporation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import enum import os import pickle import hydra import numpy as np import pandas as pd from omegaconf.listconfig import ListConfig from sklearn.impute import SimpleImputer from sklearn.preprocessing import FunctionTransformer from typing import Union class DataTypes(enum.IntEnum): """Defines numerical types of each column.""" CONTINUOUS = 0 CATEGORICAL = 1 DATE = 2 STR = 3 DTYPE_MAP = { DataTypes.CONTINUOUS: np.float32, DataTypes.CATEGORICAL: np.int64, DataTypes.DATE: np.datetime64, DataTypes.STR: str, } class InputTypes(enum.IntEnum): """Defines input types of each column.""" TARGET = 0 OBSERVED = 1 KNOWN = 2 STATIC = 3 ID = 4 # Single column used as an entity identifier TIME = 5 # Single column exclusively used as a time index WEIGHT = 6 SAMPLE_WEIGHT = 7 class FeatureSpec: enabled_attributes = ["name", "feature_type", "feature_embed_type", "cardinality", "scaler"] def __init__(self, input_dict): for key in input_dict: if key in self.enabled_attributes: setattr(self, key, input_dict[key]) else: raise ValueError("Attribute not enabled: {attr}".format(attr=key)) self.name = input_dict["name"] self.feature_type = InputTypes[input_dict["feature_type"]] self.feature_embed_type = DataTypes[input_dict["feature_embed_type"]] def get(self, key, value=None): if hasattr(self, key): return getattr(self, key) else: return value def __str__(self): return str((self.name, self.feature_type, self.feature_embed_type)) def __repr__(self): return str(self) FEAT_ORDER = [ (InputTypes.STATIC, DataTypes.CATEGORICAL), (InputTypes.STATIC, DataTypes.CONTINUOUS), (InputTypes.KNOWN, DataTypes.CATEGORICAL), (InputTypes.KNOWN, DataTypes.CONTINUOUS), (InputTypes.OBSERVED, DataTypes.CATEGORICAL), (InputTypes.OBSERVED, DataTypes.CONTINUOUS), (InputTypes.TARGET, DataTypes.CONTINUOUS), (InputTypes.WEIGHT, DataTypes.CONTINUOUS), (InputTypes.SAMPLE_WEIGHT, DataTypes.CONTINUOUS), (InputTypes.ID, DataTypes.CATEGORICAL), ] FEAT_NAMES = ["s_cat", "s_cont", "k_cat", "k_cont", "o_cat", "o_cont", "target", "weight", "sample_weight", "id"] def group_ids(df, features): col_names = ["_id_"] + [ x.name for x in features if x.feature_embed_type != DataTypes.STR and x.feature_type != InputTypes.TIME and x.feature_type != InputTypes.ID ] grouped = [x[1][col_names].values.astype(np.float32).view(dtype=np.int32) for x in df.groupby("_id_")] return grouped def translate_features(features, preproc=False): all_features = [FeatureSpec(feature) for feature in features] if preproc: return all_features return [FeatureSpec({"name": "_id_", "feature_type": "ID", "feature_embed_type": "CATEGORICAL"})] + [ feature for feature in all_features if feature.feature_type != InputTypes.ID ] def map_dt(dt): if isinstance(dt, int): dt = dt elif isinstance(dt, ListConfig): dt = datetime.datetime(*dt) elif isinstance(dt, str): dt = datetime.datetime.strptime(dt, "%Y-%m-%d") return dt def impute(df, config): if not (config.get("missing_data_label", False)): return df, None else: imp = SimpleImputer(missing_values=config.missing_data_label, strategy="mean") mask = df.applymap(lambda x: True if x == config.missing_data_label else False) data = df.values col_mask = (data == config.missing_data_label).all(axis=0) data[:, ~col_mask] = imp.fit_transform(data) return data, mask def map_scalers(features): mapping = {} for feature in features: if feature.get("scaler", None): if mapping.get(feature.scaler, None): mapping[feature.scaler].append(feature.name) else: mapping[feature.scaler] = [feature.name] return mapping class Log1pScaler(FunctionTransformer): @staticmethod def _inverse(x): return np.expm1(x) def __init__(self): super().__init__(func=np.log1p, inverse_func=Log1pScaler._inverse, validate=False) class CompositeScaler: def __init__(self, target_features, input_continuous, scale_per_id): self.target_mapping = map_scalers(target_features) self.continuous_mapping = map_scalers(input_continuous) self.target_features = target_features self.input_continuous = input_continuous self.scale_per_id = scale_per_id self.continuous_scalers = {} self.target_scalers = {} def fit(self, df): for k, v in self.continuous_mapping.items(): self.continuous_scalers[k] = {} if self.scale_per_id: for identifier, sliced in df.groupby("_id_"): scaler = hydra.utils.instantiate(k).fit(sliced[v]) self.continuous_scalers[k][identifier] = scaler else: scaler = hydra.utils.instantiate(k).fit(df[v]) self.continuous_scalers[k][""] = scaler for k, v in self.target_mapping.items(): self.target_scalers[k] = {} if self.scale_per_id: for identifier, sliced in df.groupby("_id_"): scaler = hydra.utils.instantiate(k).fit(sliced[v]) self.target_scalers[k][identifier] = scaler else: scaler = hydra.utils.instantiate(k).fit(df[v]) self.target_scalers[k][""] = scaler def apply_scalers(self, df, name=None): if name is None: name = df.name for k, v in self.continuous_mapping.items(): df[v] = self.continuous_scalers[k][name].transform(df[v]) for k, v in self.target_mapping.items(): df[v] = self.target_scalers[k][name].transform(df[v]) return df def transform(self, df): if self.scale_per_id: df = df.groupby("_id_").apply(self.apply_scalers) else: df = self.apply_scalers(df, name="") return df def inverse_transform_targets(self, values, ids=None): # TODO: Assuming single targets for now. This has to be adapted to muti-target if len(self.target_scalers) > 0: shape = values.shape scalers = list(self.target_scalers.values())[0] if self.scale_per_id: assert ids is not None flat_values = values.flatten() flat_ids = np.repeat(ids, values.shape[1]) df = pd.DataFrame({"id": flat_ids, "value": flat_values}) df_list = [] for identifier, sliced in df.groupby("id"): df_list.append(np.stack( [scalers[identifier].inverse_transform(sliced["value"].values.reshape(-1, 1)).flatten(), sliced.index.values], axis=-1)) tmp = np.concatenate(df_list) tmp = tmp[tmp[:, -1].argsort()] return tmp[:, 0].reshape(shape) else: flat_values = values.reshape(-1, 1) flat_values = scalers[""].inverse_transform(flat_values) return flat_values.reshape(shape) return values class Preprocessor: def __init__(self, config): self.config = config self.features = translate_features(self.config["features"], preproc=True) self.feat_splits = self._get_feature_splits() self.cont_features_names = [continuous.name for continuous in self.feat_splits["input_continuous"]] self.dest_path = self.config.dest_path self.source_path = self.config.source_path self.preprocessor_state = {} def _get_feature_splits(self): splits = {} splits["dates"] = [feature for feature in self.features if feature.feature_embed_type == DataTypes.DATE] splits["target_features"] = [feature for feature in self.features if feature.feature_type == InputTypes.TARGET] splits["time_feature"] = [feature for feature in self.features if feature.feature_type == InputTypes.TIME][0] splits["id_features"] = [feature for feature in self.features if feature.feature_type == InputTypes.ID] splits["input_categoricals"] = [ feature for feature in self.features if feature.feature_embed_type == DataTypes.CATEGORICAL and feature.feature_type in [InputTypes.STATIC, InputTypes.KNOWN, InputTypes.OBSERVED] ] splits["input_continuous"] = [ feature for feature in self.features if feature.feature_embed_type == DataTypes.CONTINUOUS and feature.feature_type in [InputTypes.STATIC, InputTypes.KNOWN, InputTypes.OBSERVED] ] return splits def _map_ids(self, df): print("Mapping nodes") id_features = [feature.name for feature in self.feat_splits["id_features"]] if "id_mappings" in self.preprocessor_state: id_features_df = self.preprocessor_state["id_mappings"] id_features_dict = id_features_df.set_index(id_features).to_dict()["_id_"] def id_map_funct(x): var = tuple(x[id_features]) if len(var) == 1: var = var[0] return id_features_dict.get(var, np.nan) df["_id_"] = df.apply(lambda x: id_map_funct(x), axis=1) else: id_features = [feature.name for feature in self.feat_splits["id_features"]] current_id = df[id_features[0]].astype("category").cat.codes + 1 for additional_id in id_features[1:]: current_id = df[additional_id].astype("category").cat.codes * (current_id.max() + 1) + current_id + 1 df["_id_"] = current_id.astype("category").cat.codes id_features_df = df[id_features + ["_id_"]] id_features_df = id_features_df.drop_duplicates(subset=None).reset_index(drop=True) self.preprocessor_state["id_mappings"] = id_features_df def _map_categoricals(self, df): print("Mapping categoricals to bounded range") if "categorical_mappings" in self.preprocessor_state: categorical_mappings = self.preprocessor_state["categorical_mappings"] for categorical in self.feat_splits['input_categoricals']: df[categorical.name] = df[categorical.name].map(categorical_mappings[categorical.name]) else: input_categorical_map_dict = {} for categorical in self.feat_splits['input_categoricals']: cat_feature = df[categorical.name].astype("category") input_categorical_map_dict[categorical.name] = dict(zip([np.nan] + cat_feature.cat.categories.tolist(), range(0, len(cat_feature.cat.categories)+1))) df[categorical.name] = cat_feature.cat.codes + 1 self.preprocessor_state["categorical_mappings"] = input_categorical_map_dict def _get_dataset_splits(self, df): print("Splitting datasets") if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None: forecast_len = self.config.example_length - self.config.encoder_length # The valid split is shifted from the train split by number of the forecast steps to the future. # The test split is shifted by the number of the forecast steps from the valid split valid_boundary = map_dt(self.config.valid_boundary) grouped = df.groupby('_id_') train_mask = grouped[self.config.time_ids].apply(lambda dates: dates < valid_boundary) train = df[train_mask] print('Calculated train.') train_sizes = train.groupby('_id_').size() valid_indexes = grouped[self.config.time_ids].apply( lambda dates: dates.iloc[(train_sizes[dates.name] - self.config.encoder_length): (train_sizes[dates.name] + forecast_len)].index if dates.name in train_sizes else pd.Series() ) valid = df.loc[np.concatenate(valid_indexes)] print('Calculated valid.') test_indexes = grouped[self.config.time_ids].apply( lambda dates: dates.iloc[(train_sizes[dates.name] - self.config.encoder_length + forecast_len): (train_sizes[dates.name] + 2 * forecast_len)].index if dates.name in train_sizes else pd.Series() ) test = df.loc[np.concatenate(test_indexes)] print('Calculated test.') elif df.dtypes[self.config.time_ids] not in [np.float64, np.int]: index = df[self.config.time_ids] train = df.loc[(index >= map_dt(self.config.train_range[0])) & (index < map_dt(self.config.train_range[1]))] valid = df.loc[(index >= map_dt(self.config.valid_range[0])) & (index < map_dt(self.config.valid_range[1]))] test = df.loc[(index >= map_dt(self.config.test_range[0])) & (index < map_dt(self.config.test_range[1]))] else: index = df[self.config.time_ids] train = df.loc[(index >= self.config.train_range[0]) & (index < self.config.train_range[1])] valid = df.loc[(index >= self.config.valid_range[0]) & (index < self.config.valid_range[1])] test = df.loc[(index >= self.config.test_range[0]) & (index < self.config.test_range[1])] train = train[(train.groupby('_id_').size()[train['_id_']] > self.config.encoder_length).values] valid = valid[(valid.groupby('_id_').size()[valid['_id_']] > self.config.encoder_length).values] test = test[(test.groupby('_id_').size()[test['_id_']] > self.config.encoder_length).values] return train, valid, test def _recombine_datasets(self, train, valid, test): if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None: forecast_len = self.config.example_length - self.config.encoder_length # The valid split is shifted from the train split by number of the forecast steps to the future. # The test split is shifted by the number of the forecast steps from the valid split train_temp = [] valid_temp = [] for g0, g1 in zip(train.groupby("_id_"), valid.groupby("_id_")): _train = g0[1].iloc[: -self.config.encoder_length] _valid = g1[1].iloc[:forecast_len] train_temp.append(_train) valid_temp.append(_valid) train = pd.concat(train_temp, axis=0) valid = pd.concat(valid_temp, axis=0) elif train.dtypes[self.config.time_ids] not in [np.float64, np.int]: train = train[train[self.config.time_ids] < map_dt(self.config.valid_range[0])] valid = valid[valid[self.config.time_ids] < map_dt(self.config.test_range[0])] else: train = train[train[self.config.time_ids] < self.config.valid_range[0]] valid = valid[valid[self.config.time_ids] < self.config.test_range[0]] return pd.concat((train, valid, test)) def _drop_unseen_categoricals(self, train, valid, test, drop_unseen=True): # TODO: Handle this for inference preprocess function if self.config.get("drop_unseen", False): print("Dropping unseen categoricals") if not drop_unseen: print("Warning: Assuming that inference dataset only has the input categoricals from the training set") return train, valid, test if hasattr(self.config, "valid_boundary") and self.config.valid_boundary is not None: arriter = ["_id_"] else: arriter = [cat.name for cat in self.feat_splits["input_categoricals"]] + ["_id_"] if train is not None: for categorical in arriter: seen_values = train[categorical].unique() valid = valid[valid[categorical].isin(seen_values)] test = test[test[categorical].isin(seen_values)] return train, valid, test def fit_scalers(self, df): print("Calculating scalers") self.scaler = CompositeScaler( self.feat_splits["target_features"], self.feat_splits["input_continuous"], scale_per_id=self.config.get('scale_per_id', False) ) self.scaler.fit(df) self.preprocessor_state["scalers"] = self.scaler def apply_scalers(self, df): print("Applying scalers") return self.preprocessor_state["scalers"].transform(df) def save_datasets(self, train, valid, test): print(F"Saving processed data at {self.dest_path}") os.makedirs(self.dest_path, exist_ok=True) train.to_csv(os.path.join(self.dest_path, "train.csv")) valid.to_csv(os.path.join(self.dest_path, "valid.csv")) test.to_csv(os.path.join(self.dest_path, "test.csv")) self._recombine_datasets(train, valid, test).to_csv(os.path.join(self.dest_path, "full.csv")) # Save relevant columns in binary form for faster dataloading # IMORTANT: We always expect id to be a single column indicating the complete timeseries # We also expect a copy of id in form of static categorical input!!!]] if self.config.get("binarized", False): grouped_train = group_ids(train, self.features) grouped_valid = group_ids(valid, self.features) grouped_test = group_ids(test, self.features) pickle.dump(grouped_train, open(os.path.join(self.dest_path, "train.bin"), "wb")) pickle.dump(grouped_valid, open(os.path.join(self.dest_path, "valid.bin"), "wb")) pickle.dump(grouped_test, open(os.path.join(self.dest_path, "test.bin"), "wb")) def save_state(self): filepath = os.path.join(self.dest_path, "tspp_preprocess.bin") print(F"Saving preprocessor state at {filepath}") with open(filepath, "wb") as f: pickle.dump(self.preprocessor_state, f) def load_state(self, preprocessor_state_file): filepath = os.path.join(self.config.dest_path, "tspp_preprocess.bin") if preprocessor_state_file: filepath = preprocessor_state_file if not os.path.exists(filepath): raise ValueError(F"Invalid preprocessor state file: {filepath}") print(F"Reading preprocessor state binary file: {filepath}") f = open(filepath, "rb") self.preprocessor_state = pickle.load(f) required_keys = ("id_mappings", "categorical_mappings", "scalers") if not all(k in self.preprocessor_state for k in required_keys): raise ValueError(F"preprocessor state binary file at :{filepath} must have keys={required_keys} but found={self.preprocessor_state.keys()}") def impute(self, df): print("Fixing any nans in continuous features") df[self.cont_features_names] = df[self.cont_features_names].replace(np.NaN, 10 ** 9) return df def _init_setup(self, dataset=None, drop_na=True): if dataset is None: print(F"Reading in data from CSV File: {self.source_path}") df = pd.read_csv(self.source_path, parse_dates=[d.name for d in self.feat_splits["dates"]]) elif isinstance(dataset, str) and dataset.endswith(".csv"): print(F"Reading in data from CSV File: {dataset}") df = pd.read_csv(dataset, parse_dates=[d.name for d in self.feat_splits["dates"]]) elif isinstance(dataset, pd.DataFrame): print("Input DataFrame provided for preprocessing") #TODO: check support for parse dates as done during read csv # Currently date related features are only used for dataset splits during training df = dataset.copy() else: raise ValueError(F"Function either accepts a path to a csv file or a dataframe") print("Sorting on time feature") #TODO: Check if we sort df for inference only case df = df.sort_values([self.feat_splits["time_feature"].name]) f_names = [feature.name for feature in self.features] + [self.config.time_ids] df = df[list(dict.fromkeys(f_names))] if self.config.get("missing_data_label", False): df = df.replace(self.config.get("missing_data_label"), np.NaN) if drop_na: df = df.dropna(subset=[t.name for t in self.feat_splits["target_features"]]) return df def preprocess(self): df = self._init_setup() self._map_ids(df) self._map_categoricals(df) train, valid, test = self._get_dataset_splits(df) train, valid, test = self._drop_unseen_categoricals(train, valid, test) return train, valid, test def preprocess_test(self, dataset: Union[str, pd.DataFrame]) -> pd.DataFrame: df = self._init_setup(dataset=dataset, drop_na=False) self._map_ids(df) self._map_categoricals(df) #TODO: this is a workaround and maybe needs to be handled properly in the future _, _, df = self._drop_unseen_categoricals(None, None, df, drop_unseen=False) return df
PyTorch/Detection/SSD/examples
examples
SSD300_FP32_INFERENCE_BENCHMARK
# This script launches SSD300 inference benchmark in FP32 on 1 GPU with 64 batch size # Usage bash SSD300_FP32_INFERENCE_BENCHMARK.sh <path to this repository> <path to dataset> <additional flags> python $1/main.py --backbone resnet50 --warmup 300 --mode benchmark-inference --bs 32 --no-amp --data-layout channels_first --data $2 ${@:3}
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn
nn
dense_model
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import json import tensorflow.keras.initializers as initializers import math from tensorflow.python.keras.saving.saving_utils import model_input_signature from .dcn import CrossNetwork from . import interaction import tensorflow as tf import horovod.tensorflow as hvd try: from tensorflow_dot_based_interact.python.ops import dot_based_interact_ops except ImportError: print('WARNING: Could not import the custom dot-interaction kernels') dense_model_parameters = ['embedding_dim', 'interaction', 'bottom_mlp_dims', 'top_mlp_dims', 'num_numerical_features', 'categorical_cardinalities', 'transpose', 'num_cross_layers', 'cross_layer_projection_dim', 'batch_size'] class DenseModel(tf.keras.Model): def __init__(self, **kwargs): super(DenseModel, self).__init__() for field in dense_model_parameters: self.__dict__[field] = kwargs[field] self.num_all_categorical_features = len(self.categorical_cardinalities) self.bottom_mlp_dims = [int(d) for d in self.bottom_mlp_dims] self.top_mlp_dims = [int(d) for d in self.top_mlp_dims] if self.interaction != 'cross' and any(dim != self.embedding_dim[0] for dim in self.embedding_dim): raise ValueError(f'For DLRM all embedding dimensions should be equal, ' f'got interaction={interaction}, embedding_dim={self.embedding_dim}') if self.interaction != 'cross' and self.bottom_mlp_dims[-1] != self.embedding_dim[0]: raise ValueError(f'Final dimension of the Bottom MLP should match embedding dimension. ' f'Got: {self.bottom_mlp_dims[-1]} and {self.embedding_dim} respectively.') self._create_interaction_op() self._create_bottom_mlp() self._create_top_mlp() self.bottom_mlp_padding = self._compute_padding(num_features=self.num_numerical_features) self.top_mlp_padding = self._compute_padding(num_features=self._get_top_mlp_input_features()) def _create_interaction_op(self): if self.interaction == 'dot_custom_cuda': self.interact_op = dot_based_interact_ops.dot_based_interact elif self.interaction == 'dot_tensorflow': # TODO: add support for datasets with no dense features self.interact_op = interaction.DotInteractionGather(num_features=self.num_all_categorical_features + 1) elif self.interaction == 'cross': self.interact_op = CrossNetwork(num_layers=self.num_cross_layers, projection_dim=self.cross_layer_projection_dim) else: raise ValueError(f'Unknown interaction {self.interaction}') @staticmethod def _compute_padding(num_features, multiple=8): pad_to = math.ceil(num_features / multiple) * multiple return pad_to - num_features def _get_top_mlp_input_features(self): if self.interaction == 'cross': num_features = sum(self.embedding_dim) if self.num_numerical_features != 0: num_features += self.bottom_mlp_dims[-1] return num_features else: num_features = self.num_all_categorical_features if self.num_numerical_features != 0: num_features += 1 num_features = num_features * (num_features - 1) num_features = num_features // 2 num_features = num_features + self.bottom_mlp_dims[-1] return num_features def _create_bottom_mlp(self): self.bottom_mlp_layers = [] for dim in self.bottom_mlp_dims: kernel_initializer = initializers.GlorotNormal() bias_initializer = initializers.RandomNormal(stddev=math.sqrt(1. / dim)) l = tf.keras.layers.Dense(dim, activation='relu', kernel_initializer=kernel_initializer, bias_initializer=bias_initializer) self.bottom_mlp_layers.append(l) def _create_top_mlp(self): self.top_mlp = [] for i, dim in enumerate(self.top_mlp_dims): if i == len(self.top_mlp_dims) - 1: # final layer activation = 'linear' else: activation = 'relu' kernel_initializer = initializers.GlorotNormal() bias_initializer = initializers.RandomNormal(stddev=math.sqrt(1. / dim)) l = tf.keras.layers.Dense(dim, activation=activation, kernel_initializer=kernel_initializer, bias_initializer=bias_initializer) self.top_mlp.append(l) def transpose_nonequal_embedding_dim(self, embedding_outputs, numerical_features): # We get a table-major format here for inference, # but the sizes of the tables are not the same. # Therefore a simple transposition will not work, # we need to perform multiple splits and concats instead. # TODO: test this. embedding_outputs = tf.reshape(embedding_outputs, shape=[-1]) batch_size = numerical_features.shape[0] split_sizes = [batch_size * dim for dim in self.embedding_dim] embedding_outputs = tf.split(embedding_outputs, num_or_size_splits=split_sizes) embedding_outputs = [tf.split(eout, num_or_size_splits=dim) for eout, dim in zip(embedding_outputs, self.emdedding_dim)] transposed_outputs = [] * batch_size for i, o in enumerate(transposed_outputs): ith_sample = [out[i] for out in embedding_outputs] ith_sample = tf.concat(ith_sample, axis=1) transposed_outputs[i] = ith_sample transposed_outputs = tf.concat(transposed_outputs, axis=0) return tf.reshape(transposed_outputs, shape=[batch_size, sum(self.embedding_dim)]) def transpose_input(self, embedding_outputs, numerical_features): if any(dim != self.embedding_dim[0] for dim in self.embedding_dim): return self.transpose_nonequal_embedding_dim(embedding_outputs, numerical_features) else: embedding_outputs = tf.reshape(embedding_outputs, shape=[self.num_all_categorical_features, -1, self.embedding_dim[0]]) return tf.transpose(embedding_outputs, perm=[1, 0, 2]) def reshape_input(self, embedding_outputs): if self.interaction == 'cross': return tf.reshape(embedding_outputs, shape=[-1, sum(self.embedding_dim)]) else: return tf.reshape(embedding_outputs, shape=[-1, self.num_all_categorical_features, self.embedding_dim[0]]) @tf.function def call(self, numerical_features, embedding_outputs, sigmoid=False, training=False): numerical_features = tf.reshape(numerical_features, shape=[-1, self.num_numerical_features]) bottom_mlp_out = self._call_bottom_mlp(numerical_features, training) if self.transpose: embedding_outputs = self.transpose_input(embedding_outputs, numerical_features) embedding_outputs = self.reshape_input(embedding_outputs) x = self._call_interaction(embedding_outputs, bottom_mlp_out) x = self._call_top_mlp(x) if sigmoid: x = tf.math.sigmoid(x) x = tf.cast(x, tf.float32) return x def _pad_bottom_mlp_input(self, numerical_features, training): if training: # When training, padding with a statically fixed batch size so that XLA has better shape information. # This yields a significant (~15%) speedup for singleGPU DLRM. padding = tf.zeros(shape=[self.batch_size // hvd.size(), self.bottom_mlp_padding], dtype=self.compute_dtype) x = tf.concat([numerical_features, padding], axis=1) else: # For inference, use tf.pad. # This way inference can be performed with any batch size on the deployed SavedModel. x = tf.pad(numerical_features, [[0, 0], [0, self.bottom_mlp_padding]]) return x def _call_bottom_mlp(self, numerical_features, training): numerical_features = tf.cast(numerical_features, dtype=self.compute_dtype) x = self._pad_bottom_mlp_input(numerical_features, training) with tf.name_scope('bottom_mlp'): for l in self.bottom_mlp_layers: x = l(x) x = tf.expand_dims(x, axis=1) bottom_mlp_out = x return bottom_mlp_out def _call_interaction(self, embedding_outputs, bottom_mlp_out): if self.interaction == 'cross': bottom_mlp_out = tf.reshape(bottom_mlp_out, [-1, self.bottom_mlp_dims[-1]]) x = tf.concat([bottom_mlp_out, embedding_outputs], axis=1) x = self.interact_op(x) else: bottom_part_output = tf.concat([bottom_mlp_out, embedding_outputs], axis=1) x = tf.reshape(bottom_part_output, shape=[-1, self.num_all_categorical_features + 1, self.embedding_dim[0]]) bottom_mlp_out = tf.reshape(bottom_mlp_out, shape=[-1, self.bottom_mlp_dims[-1]]) x = self.interact_op(x, bottom_mlp_out) return x def _call_top_mlp(self, x): if self.interaction != 'dot_custom_cuda': x = tf.reshape(x, [-1, self._get_top_mlp_input_features()]) x = tf.pad(x, [[0, 0], [0, self.top_mlp_padding]]) with tf.name_scope('top_mlp'): for i, l in enumerate(self.top_mlp): x = l(x) return x def save_model(self, path, save_input_signature=False): if save_input_signature: input_sig = model_input_signature(self, keep_original_batch_size=True) call_graph = tf.function(self) signatures = call_graph.get_concrete_function(input_sig[0]) else: signatures = None tf.keras.models.save_model(model=self, filepath=path, overwrite=True, signatures=signatures) def force_initialization(self, batch_size=64, training=False, flattened_input=True): if flattened_input: embeddings_output = tf.zeros([batch_size * sum(self.embedding_dim)]) numerical_input = tf.zeros([batch_size * self.num_numerical_features]) else: embeddings_output = tf.zeros([batch_size, sum(self.embedding_dim)]) numerical_input = tf.zeros([batch_size, self.num_numerical_features]) _ = self(numerical_input, embeddings_output, sigmoid=False, training=training) @staticmethod def load_model(path): print('Loading a saved model from', path) loaded = tf.keras.models.load_model(path) return loaded def save_config(self, path): config = {k : self.__dict__[k] for k in dense_model_parameters} with open(path, 'w') as f: json.dump(obj=config, fp=f, indent=4) @staticmethod def from_config(path): with open(path) as f: config = json.load(fp=f) return DenseModel(**config)
PyTorch/LanguageModeling/BERT/data/squad
squad
squad_download
#!/usr/bin/env bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Downloading dataset for squad..." # Download SQuAD v1="v1.1" mkdir $v1 wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json -O $v1/train-v1.1.json wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json -O $v1/dev-v1.1.json wget https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/ -O $v1/evaluate-v1.1.py EXP_TRAIN_v1='981b29407e0affa3b1b156f72073b945 -' EXP_DEV_v1='3e85deb501d4e538b6bc56f786231552 -' EXP_EVAL_v1='afb04912d18ff20696f7f88eed49bea9 -' CALC_TRAIN_v1=`cat ${v1}/train-v1.1.json |md5sum` CALC_DEV_v1=`cat ${v1}/dev-v1.1.json |md5sum` CALC_EVAL_v1=`cat ${v1}/evaluate-v1.1.py |md5sum` v2="v2.0" mkdir $v2 wget https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json -O $v2/train-v2.0.json wget https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json -O $v2/dev-v2.0.json wget https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ -O $v2/evaluate-v2.0.py EXP_TRAIN_v2='62108c273c268d70893182d5cf8df740 -' EXP_DEV_v2='246adae8b7002f8679c027697b0b7cf8 -' EXP_EVAL_v2='ff23213bed5516ea4a6d9edb6cd7d627 -' CALC_TRAIN_v2=`cat ${v2}/train-v2.0.json |md5sum` CALC_DEV_v2=`cat ${v2}/dev-v2.0.json |md5sum` CALC_EVAL_v2=`cat ${v2}/evaluate-v2.0.py |md5sum` echo "Squad data download done!" echo "Verifying Dataset...." if [ "$EXP_TRAIN_v1" != "$CALC_TRAIN_v1" ]; then echo "train-v1.1.json is corrupted! md5sum doesn't match" fi if [ "$EXP_DEV_v1" != "$CALC_DEV_v1" ]; then echo "dev-v1.1.json is corrupted! md5sum doesn't match" fi if [ "$EXP_EVAL_v1" != "$CALC_EVAL_v1" ]; then echo "evaluate-v1.1.py is corrupted! md5sum doesn't match" fi if [ "$EXP_TRAIN_v2" != "$CALC_TRAIN_v2" ]; then echo "train-v2.0.json is corrupted! md5sum doesn't match" fi if [ "$EXP_DEV_v2" != "$CALC_DEV_v2" ]; then echo "dev-v2.0.json is corrupted! md5sum doesn't match" fi if [ "$EXP_EVAL_v2" != "$CALC_EVAL_v2" ]; then echo "evaluate-v2.0.py is corrupted! md5sum doesn't match" fi echo "Complete!"
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/tabular
tabular
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # flake8: noqa from .tabular_metrics import TabularMetrics from .utils import load_data
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/scripts/docker
docker
interactive
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DATASET_PATH=${1:-"/data/"} NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=0} docker run -it --rm \ --runtime=nvidia \ -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \ --net=host \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ --ipc=host \ -e WORKDIR="$(pwd)" \ -e PYTHONPATH="$(pwd)" \ -v ${DATASET_PATH}/processed/:"$(pwd)"/datasets/ \ -v "$(pwd)":"$(pwd)" \ -v /var/run/docker.sock:/var/run/docker.sock \ -w "$(pwd)" \ tft:latest bash
TensorFlow/LanguageModeling/BERT/biobert
biobert
re_eval
import os import numpy as np import pandas as pd import sklearn.metrics import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--output_path', type=str, help='') parser.add_argument('--answer_path', type=str, help='') parser.add_argument('--task', type=str, default="binary", help='default:binary, possible other options:{chemprot}') args = parser.parse_args() testdf = pd.read_csv(args.answer_path, sep="\t", header=None) preddf = pd.read_csv(args.output_path, sep="\t", header=None) # binary if args.task == "binary": pred = [preddf.iloc[i].tolist() for i in preddf.index] pred_class = [np.argmax(v) for v in pred] pred_prob_one = [v[1] for v in pred] p,r,f,s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=testdf["label"]) results = dict() results["f1 score"] = f[1] results["recall"] = r[1] results["precision"] = p[1] results["specificity"] = r[0] # chemprot # micro-average of 5 target classes # see "Potent pairing: ensemble of long short-term memory networks and support vector machine for chemical-protein relation extraction (Mehryary, 2018)" for details if args.task == "chemprot": pred = [preddf.iloc[i].tolist() for i in preddf.index] pred_class = [np.argmax(v) for v in pred] str_to_int_mapper = dict() testdf.iloc[:,3] = testdf.iloc[:, 3].fillna("False") for i,v in enumerate(sorted(testdf.iloc[:,3].unique())): str_to_int_mapper[v] = i test_answer = [str_to_int_mapper[v] for v in testdf.iloc[:,3]] p,r,f,s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, labels=[0,1,2,3,4], average="micro") results = dict() results["f1 score"] = f results["recall"] = r results["precision"] = p for k,v in results.items(): print("{:11s} : {:.2%}".format(k,v))
PyTorch/Detection/Efficientdet/scripts
scripts
process_checkpoint_for_eval
#!/bin/bash python utils/checkpoint_processing.py --checkpoint_path /checkpoints/model_best.pth.tar --state_dict_path /checkpoints/Effdet_B0.pth
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda
bermuda
pyt
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from collections import Counter from pathlib import Path from typing import Dict, Iterable, NamedTuple, Optional, Union import torch # pytype: disable=import-error import yaml from ..core import ( GET_MODEL_FN_NAME, BaseConverter, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec, load_from_file, ) from ..extensions import converters, loaders, runners, savers from .utils import get_dynamic_axes, get_input_shapes, get_shapes_with_dynamic_axes LOGGER = logging.getLogger(__name__) class InputOutputSpec(NamedTuple): inputs: Dict[str, TensorSpec] outputs: Dict[str, TensorSpec] def get_sample_input(dataloader, device): for batch in dataloader: _, x, _ = batch break if isinstance(x, dict): sample_input = list(x.values()) elif isinstance(x, list): sample_input = x else: raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict") for idx, s in enumerate(sample_input): sample_input[idx] = torch.from_numpy(s).to(device) return tuple(sample_input) def get_model_device(torch_model): if next(torch_model.parameters()).is_cuda: return "cuda" else: return "cpu" def infer_model_precision(model): counter = Counter() for param in model.parameters(): counter[param.dtype] += 1 if counter[torch.float16] > 0: return Precision.FP16 else: return Precision.FP32 def _get_tensor_dtypes(dataloader, precision): def _get_dtypes(t): dtypes = {} for k, v in t.items(): dtype = str(v.dtype) if dtype == "float64": dtype = "float32" if precision == Precision.FP16 and dtype == "float32": dtype = "float16" dtypes[k] = dtype return dtypes input_dtypes = {} output_dtypes = {} for batch in dataloader: _, x, y = batch input_dtypes = _get_dtypes(x) output_dtypes = _get_dtypes(y) break return input_dtypes, output_dtypes ### TODO assumption: floating point input ### type has same precision as the model def _get_io_spec(model, dataloader_fn): precision = model.precision dataloader = dataloader_fn() input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision) input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader) inputs = { name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in model.inputs } outputs = { name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name])) for name in model.outputs } return InputOutputSpec(inputs, outputs) class PyTorchModelLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) model, tensor_infos = get_model(**self._model_args) io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"]) precision = infer_model_precision(model) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptLoader(BaseLoader): def __init__(self, tensor_names_path: str = None, **kwargs): self._model_args = kwargs self._io_spec = None if tensor_names_path is not None: with Path(tensor_names_path).open("r") as fh: tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader) self._io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"]) def load(self, model_path: Union[str, Path], **_) -> Model: if not isinstance(model_path, Path): model_path = Path(model_path) model = torch.jit.load(model_path.as_posix()) precision = infer_model_precision(model) io_spec = self._io_spec if not io_spec: yaml_path = model_path.parent / f"{model_path.stem}.yaml" if not yaml_path.is_file(): raise ValueError( f"If `--tensor-names-path is not provided, " f"TorchScript model loader expects file {yaml_path} with tensor information." ) with yaml_path.open("r") as fh: tensor_info = yaml.load(fh, Loader=yaml.SafeLoader) io_spec = InputOutputSpec(tensor_info["inputs"], tensor_info["outputs"]) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptTraceConverter(BaseConverter): def __init__(self): pass def convert(self, model: Model, dataloader_fn) -> Model: device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) converted_model = torch.jit.trace_module(model.handle, {"forward": dummy_input}) io_spec = _get_io_spec(model, dataloader_fn) return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptScriptConverter(BaseConverter): def __init__(self): pass def convert(self, model: Model, dataloader_fn) -> Model: converted_model = torch.jit.script(model.handle) io_spec = _get_io_spec(model, dataloader_fn) return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class PYT2ONNXConverter(BaseConverter): def __init__(self, onnx_opset: int = None): self._onnx_opset = onnx_opset def convert(self, model: Model, dataloader_fn) -> Model: import tempfile import onnx # pytype: disable=import-error assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted." dynamic_axes = get_dynamic_axes(dataloader_fn()) device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) with tempfile.TemporaryDirectory() as tmpdirname: export_path = os.path.join(tmpdirname, "model.onnx") with torch.no_grad(): torch.onnx.export( model.handle, dummy_input, export_path, do_constant_folding=True, input_names=list(model.inputs), output_names=list(model.outputs), dynamic_axes=dynamic_axes, opset_version=self._onnx_opset, enable_onnx_checker=True, ) onnx_model = onnx.load(export_path) onnx.checker.check_model(onnx_model) onnx.helper.strip_doc_string(onnx_model) onnx_model = onnx.shape_inference.infer_shapes(onnx_model) return Model( handle=onnx_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs, ) class PYT2TensorRTConverter(BaseConverter): def __init__(self, max_batch_size: int, max_workspace_size: int, onnx_opset: int, precision: str): self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._onnx_opset = onnx_opset self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: from .onnx import _infer_graph_precision from .onnx2trt_conv import onnx2trt pyt2onnx_converter = PYT2ONNXConverter(self._onnx_opset) onnx_model = pyt2onnx_converter.convert(model, dataloader_fn).handle precision = _infer_graph_precision(onnx_model.graph) input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size) cuda_engine = onnx2trt( onnx_model, shapes=input_shapes, max_workspace_size=self._max_workspace_size, max_batch_size=self._max_batch_size, model_precision=self._precision.value, ) return Model( handle=cuda_engine, precision=model.precision, inputs=model.inputs, outputs=model.outputs, ) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 class TorchScriptSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path]) -> None: if not isinstance(model_path, Path): model_path = Path(model_path) if isinstance(model.handle, torch.jit.ScriptModule): torch.jit.save(model.handle, model_path.as_posix()) else: print("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.") assert False # temporary error handling def _format_tensor_spec(tensor_spec): # wrapping shape with list and whole tensor_spec with dict() is required for correct yaml dump tensor_spec = tensor_spec._replace(shape=list(tensor_spec.shape)) tensor_spec = dict(tensor_spec._asdict()) return tensor_spec # store TensorSpecs from inputs and outputs in a yaml file tensor_specs = { "inputs": {k: _format_tensor_spec(v) for k, v in model.inputs.items()}, "outputs": {k: _format_tensor_spec(v) for k, v in model.outputs.items()}, } yaml_path = model_path.parent / f"{model_path.stem}.yaml" with Path(yaml_path).open("w") as fh: yaml.dump(tensor_specs, fh, indent=4) class PyTorchRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return PyTorchRunnerSession(model=model) class PyTorchRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted." self._model = model self._output_names = None def __enter__(self): self._output_names = list(self._model.outputs) return self def __exit__(self, exc_type, exc_value, traceback): self._output_names = None self._model = None def __call__(self, x: Dict[str, object]): with torch.no_grad(): feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()] y_pred = self._model.handle(*feed_list) if isinstance(y_pred, torch.Tensor): y_pred = (y_pred,) y_pred = [t.cpu().numpy() for t in y_pred] y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.PYT.value, PyTorchModelLoader) loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader) loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader) converters.register_extension(f"{Format.PYT.value}--{Format.TS_SCRIPT.value}", TorchScriptScriptConverter) converters.register_extension(f"{Format.PYT.value}--{Format.TS_TRACE.value}", TorchScriptTraceConverter) converters.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXConverter) converters.register_extension(f"{Format.PYT.value}--{Format.TRT.value}", PYT2TensorRTConverter) savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver) savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver) runners.register_extension(Format.PYT.value, PyTorchRunner) runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner) runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/layers
layers
__init__
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Layers package definition.""" from official.nlp.modeling.layers.attention import * # pylint: disable=wildcard-import from official.nlp.modeling.layers.dense_einsum import DenseEinsum from official.nlp.modeling.layers.masked_softmax import MaskedSoftmax from official.nlp.modeling.layers.on_device_embedding import OnDeviceEmbedding from official.nlp.modeling.layers.position_embedding import PositionEmbedding from official.nlp.modeling.layers.self_attention_mask import SelfAttentionMask from official.nlp.modeling.layers.transformer import Transformer from official.nlp.modeling.layers.transformer_scaffold import TransformerScaffold
PyTorch/SpeechRecognition/wav2vec2/common
common
tb_dllogger
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import atexit import glob import os import re from pathlib import Path import numpy as np import torch from torch.utils.tensorboard import SummaryWriter import dllogger tb_loggers = {} class TBLogger: """ xyz_dummies: stretch the screen with empty plots so the legend would always fit for other plots """ def __init__(self, enabled, log_dir, name, interval=1, dummies=True): self.enabled = enabled self.interval = interval self.cache = {} if self.enabled: self.summary_writer = SummaryWriter( log_dir=os.path.join(log_dir, name), flush_secs=120, max_queue=200) atexit.register(self.summary_writer.close) if dummies: for key in ('_', '✕'): self.summary_writer.add_scalar(key, 0.0, 1) def log(self, step, data): for k, v in data.items(): self.log_value(step, k, v.item() if type(v) is torch.Tensor else v) def log_value(self, step, key, val, stat='mean'): if self.enabled: if key not in self.cache: self.cache[key] = [] self.cache[key].append(val) if len(self.cache[key]) == self.interval: agg_val = getattr(np, stat)(self.cache[key]) self.summary_writer.add_scalar(key, agg_val, step) del self.cache[key] def log_grads(self, step, model): if self.enabled: norms = [p.grad.norm().item() for p in model.parameters() if p.grad is not None] for stat in ('max', 'min', 'mean'): self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms), stat=stat) def unique_log_fpath(fpath): """Have a unique log filename for every separate run""" log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1)) for f in glob.glob(f"{fpath}.*")]) return f"{fpath}.{log_num + 1}" def stdout_step_format(step): if isinstance(step, str): return step fields = [] if len(step) > 0: fields.append("epoch {:>4}".format(step[0])) if len(step) > 1: fields.append("iter {:>3}".format(step[1])) if len(step) > 2: fields[-1] += "/{}".format(step[2]) return " | ".join(fields) def stdout_metric_format(metric, metadata, value): name = metadata.get("name", metric + " : ") unit = metadata.get("unit", None) fmt = f'{{{metadata.get("format", "")}}}' fields = [name, fmt.format(value) if value is not None else value, unit] fields = [f for f in fields if f is not None] return "| " + " ".join(fields) def log(when, metrics={}, scope='train', flush_log=False, tb_iter=None): dllogger.log(when, data=metrics.get_metrics(scope, 'dll')) if tb_iter is not None: tb_loggers[scope].log(tb_iter, metrics.get_metrics(scope, 'tb')) if flush_log: flush() def log_grads_tb(tb_total_steps, grads, tb_subset='train'): tb_loggers[tb_subset].log_grads(tb_total_steps, grads) def log_parameters(data, verbosity=0, tb_subset=None): for k, v in data.items(): v = str(v) if isinstance(v, Path) else v dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity) if tb_subset is not None and tb_loggers[tb_subset].enabled: tb_data = {k: v for k, v in data.items() if type(v) in (str, bool, int, float)} tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {}) def flush(): dllogger.flush() for tbl in tb_loggers.values(): if tbl.enabled: tbl.summary_writer.flush()
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim
optim
fused_adam
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import types import torch def get_fused_adam_class(): """ Look for the FusedAdam optimizer from apex. We first try to load the "contrib" interface, which is a bit faster than the main interface, but is technically deprecated. """ try: # The "deprecated" interface in recent versions of apex is a bit # faster than the main interface, since we don't use the apex # optimizer. This can be installed by passing the # `--deprecated_fused_adam` option when building apex. global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") return FusedAdamV1 except ImportError: try: # fallback to the newer interface from apex.optimizers import FusedAdam as _FusedAdam # noqa from apex.multi_tensor_apply import multi_tensor_applier if multi_tensor_applier.available: return FusedAdamV2 except ImportError: pass return None class FusedAdamV1(torch.optim.Optimizer): """ Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0.0, max_grad_norm=0.0, amsgrad=False, ): global fused_adam_cuda import importlib fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError("FusedAdam does not support the AMSGrad variant.") defaults = { "lr": lr, "bias_correction": bias_correction, "betas": betas, "eps": eps, "weight_decay": weight_decay, "max_grad_norm": max_grad_norm, } super().__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True @property def supports_step_with_scale(self): return True def step(self, closure=None, grads=None, scale=1.0, grad_norms=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads if grad_norms is None: grad_norms = [None] * len(self.param_groups) for group, grads_this_group, grad_norm in zip( self.param_groups, grads_group, grad_norms ): if grads_this_group is None: grads_this_group = [None] * len(group["params"]) # compute combined scale factor for this group combined_scale = scale if group.get("max_grad_norm", 0) > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group["max_grad_norm"] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group.get("bias_correction", 1) else 0 for p, grad in zip(group["params"], grads_this_group): # note: p.grad should not ever be set for correct # operation of mixed precision optimizer that sometimes # sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) p_data_fp32 = p.data.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) exp_avg = state["exp_avg"] exp_avg_sq = state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 out_p = p.data with torch.cuda.device(p.device): fused_adam_cuda.adam( p_data_fp32, out_p, exp_avg, exp_avg_sq, grad, group["lr"], beta1, beta2, group["eps"], combined_scale, state["step"], self.eps_mode, bias_correction, group["weight_decay"], ) return loss try: from apex.optimizers import FusedAdam from apex.multi_tensor_apply import multi_tensor_applier class FusedAdamV2(FusedAdam): """ Compared to the original version in Apex, the fairseq version casts grads and params to FP32 internally to support ``--memory-efficient-fp16``. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not hasattr(self, "multi_tensor_adam"): raise Exception( "Apex installation is outdated. Please install an updated version of apex." ) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step( self, closure=None, grads=None, output_params=None, scale=None, grad_norms=None, ): """Performs a single optimization step.""" loss = None if closure is not None: loss = closure() for group in self.param_groups: bias_correction = 1 if group["bias_correction"] else 0 beta1, beta2 = group["betas"] # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if "step" in group: group["step"] += 1 else: group["step"] = 1 # create lists for multi-tensor apply g_16, p_16, orig_p_16, m_16, v_16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p in group["params"]: if p.grad is None: continue if p.grad.data.is_sparse: raise RuntimeError( "FusedAdam does not support sparse gradients, " "please consider SparseAdam instead" ) state = self.state[p] # State initialization if len(state) == 0: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data, dtype=torch.float) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like( p.data, dtype=torch.float ) else: state["exp_avg"] = state["exp_avg"].to( device=p.data.device, dtype=torch.float ) state["exp_avg_sq"] = state["exp_avg_sq"].to( device=p.data.device, dtype=torch.float ) if p.dtype == torch.float16: g_16.append(p.grad.data.float()) p_16.append(p.data.float()) orig_p_16.append(p.data) m_16.append(state["exp_avg"]) v_16.append(state["exp_avg_sq"]) elif p.dtype == torch.float32: g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state["exp_avg"]) v_32.append(state["exp_avg_sq"]) else: raise RuntimeError("FusedAdam only support fp16 and fp32.") with torch.cuda.device(p.device): if len(g_16) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) for orig_p, p in zip(orig_p_16, p_16): orig_p.copy_(p.data) if len(g_32) > 0: multi_tensor_applier( self.multi_tensor_adam, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group["lr"], beta1, beta2, group["eps"], group["step"], self.adam_w_mode, bias_correction, group["weight_decay"], ) return loss except ImportError: pass
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/evaluator
evaluator
ctlevaluator
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: evaluators.evaluator.CTLMetricEvaluator config: batch_size: 1024 output_selector: 0 save_predictions: false metrics: - MAE - RMSE - SMAPE - ND
PyTorch/SpeechRecognition/wav2vec2/common/fairseq
fairseq
tokenizer
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line.split()
PyTorch/Classification/GPUNet/triton/225ms-D/runner
runner
config_NVIDIA-DGX-A100-(1x-A100-80GB)
batching: dynamic checkpoints: - name: 2.25ms-D url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d2_pyt_ckpt/versions/21.12.0_amp/zip configurations: - checkpoint: 2.25ms-D parameters: backend_accelerator: trt checkpoint: 2.25ms-D device_kind: gpu export_format: onnx export_precision: fp16 format: onnx max_batch_size: 64 number_of_model_instances: 2 precision: fp16 tensorrt_capture_cuda_graph: 0 torch_jit: none container_version: '21.12' datasets: - name: imagenet datasets_dir: datasets ensemble_model_name: null framework: PyTorch measurement_steps_offline: 8 measurement_steps_online: 32 model_name: GPUnet performance_tool: model_analyzer triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3 triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc
voc
__init__
import logging from .voc_eval import do_voc_evaluation def voc_evaluation(dataset, predictions, output_folder, box_only, **_): logger = logging.getLogger("maskrcnn_benchmark.inference") if box_only: logger.warning("voc evaluation doesn't support box_only, ignored.") logger.info("performing voc evaluation, ignored iou_types.") return do_voc_evaluation( dataset=dataset, predictions=predictions, output_folder=output_folder, logger=logger, )
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/layers
layers
lstm
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_LSTM_H #define TT2I_LSTM_H #include "layerData.h" namespace nvinfer1 { class INetworkDefinition; class ITensor; class ILayer; } // namespace nvinfer1 namespace tts { class LSTM { public: /** * @brief Add a new bidirection LSTM layer to the network with padding at the * end of the sequence, and with a number of * hidden layers equal to half the number of output layers. * * @param network The network to add to. * @param input The input tensor. * @param inputLength The length of each input sequence. * @param numDimensions The number of output dimensions of the LSTM. * @param lstmData The LSTM weights (must be in * scope until the network is finished building). * @param name The name to prefix the layers with. * * @return The last of the newly added layrs. */ static nvinfer1::ILayer* addPaddedBidirectional(nvinfer1::INetworkDefinition* network, nvinfer1::ITensor* input, nvinfer1::ITensor* inputLength, int numDimensions, const LayerData& lstmData); /** * @brief Add a new unidirection LSTM layer to the network, with a number of * hidden layers equal to half the number of output layers. * * @param network The network to add to. * @param input The input tensor. * @param input The input hidden states. * @param input The input cell states. * @param numDimensions The number of output dimensions of the LSTM. * @param lstmData The LSTM weights (must be in * scope until the network is finished building). * * @return The last of the newly added layrs. */ static nvinfer1::ILayer* addUnidirectionalCell(nvinfer1::INetworkDefinition* network, nvinfer1::ITensor* input, nvinfer1::ITensor* hiddenStatesIn, nvinfer1::ITensor* cellStatesIn, int numDimensions, const LayerData& lstmData); }; } // namespace tts #endif
PyTorch/Classification/GPUNet/configs/batch1/GV100
GV100
0.5ms-D
[ { "layer_type": "data", "img_resolution": 224, "distill": false }, { "layer_type": "head", "num_in_channels": 3, "num_out_channels": 32, "act": "relu" }, { "layer_type": "conv", "num_in_channels": 32, "num_out_channels": 32, "stride": 1, "kernel_size": 3, "act": "relu", "stage": 1 }, { "layer_type": "conv", "num_in_channels": 32, "num_out_channels": 32, "stride": 1, "kernel_size": 3, "act": "relu", "stage": 1 }, { "layer_type": "conv", "num_in_channels": 32, "num_out_channels": 64, "stride": 2, "kernel_size": 3, "act": "relu", "stage": 2 }, { "layer_type": "irb", "num_in_channels": 64, "num_out_channels": 96, "stride": 2, "expansion": 8, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 3 }, { "layer_type": "irb", "num_in_channels": 96, "num_out_channels": 96, "stride": 1, "expansion": 7.67, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 3 }, { "layer_type": "irb", "num_in_channels": 96, "num_out_channels": 96, "stride": 1, "expansion": 7.67, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 3 }, { "layer_type": "irb", "num_in_channels": 96, "num_out_channels": 256, "stride": 2, "expansion": 7.67, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 4 }, { "layer_type": "irb", "num_in_channels": 256, "num_out_channels": 256, "stride": 1, "expansion": 4.25, "kernel_size": 5, "act": "relu", "use_se": false, "stage": 4 }, { "layer_type": "irb", "num_in_channels": 256, "num_out_channels": 256, "stride": 1, "expansion": 4.75, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 4 }, { "layer_type": "irb", "num_in_channels": 256, "num_out_channels": 704, "stride": 2, "expansion": 8, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 5 }, { "layer_type": "irb", "num_in_channels": 704, "num_out_channels": 704, "stride": 1, "expansion": 3.82, "kernel_size": 5, "act": "relu", "use_se": false, "stage": 5 }, { "layer_type": "irb", "num_in_channels": 704, "num_out_channels": 704, "stride": 1, "expansion": 3.36, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 5 }, { "layer_type": "irb", "num_in_channels": 704, "num_out_channels": 704, "stride": 1, "expansion": 2.55, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 5 }, { "layer_type": "irb", "num_in_channels": 704, "num_out_channels": 704, "stride": 2, "expansion": 5.73, "kernel_size": 3, "act": "relu", "use_se": false, "stage": 6 }, { "layer_type": "tail", "num_in_channels": 704, "num_out_channels": 1984, "num_classes": 1000 } ]
PyTorch/Segmentation/MaskRCNN/pytorch/configs/gn_baselines
gn_baselines
e2e_faster_rcnn_R_50_FPN_Xconv1fc_1x_gn
INPUT: MIN_SIZE_TRAIN: 800 MAX_SIZE_TRAIN: 1333 MIN_SIZE_TEST: 800 MAX_SIZE_TEST: 1333 MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50-GN" BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RESNETS: # use GN for backbone TRANS_FUNC: "BottleneckWithGN" STEM_FUNC: "StemWithGN" FPN: USE_GN: True # use GN for FPN RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True BATCH_SIZE_PER_IMAGE: 512 POSITIVE_FRACTION: 0.25 ROI_BOX_HEAD: USE_GN: True # use GN for bbox head POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 CONV_HEAD_DIM: 256 NUM_STACKED_CONVS: 4 FEATURE_EXTRACTOR: "FPNXconv1fcFeatureExtractor" PREDICTOR: "FPNPredictor" DATASETS: TRAIN: ("coco_2014_train", "coco_2014_valminusminival") TEST: ("coco_2014_minival",) DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: # Assume 8 gpus BASE_LR: 0.02 WEIGHT_DECAY: 0.0001 STEPS: (60000, 80000) MAX_ITER: 90000 IMS_PER_BATCH: 16 TEST: IMS_PER_BATCH: 8
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
learning_schedules_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.learning_schedules.""" import numpy as np import tensorflow as tf from object_detection.utils import learning_schedules from object_detection.utils import test_case class LearningSchedulesTest(test_case.TestCase): def testExponentialDecayWithBurnin(self): def graph_fn(global_step): learning_rate_base = 1.0 learning_rate_decay_steps = 3 learning_rate_decay_factor = .1 burnin_learning_rate = .5 burnin_steps = 2 min_learning_rate = .05 learning_rate = learning_schedules.exponential_decay_with_burnin( global_step, learning_rate_base, learning_rate_decay_steps, learning_rate_decay_factor, burnin_learning_rate, burnin_steps, min_learning_rate) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9) ] exp_rates = [.5, .5, 1, 1, 1, .1, .1, .1, .05] self.assertAllClose(output_rates, exp_rates, rtol=1e-4) def testCosineDecayWithWarmup(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 100 warmup_learning_rate = 0.1 warmup_steps = 9 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0.1, 0.5, 0.9, 1.0, 0] input_global_steps = [0, 4, 8, 9, 100] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testCosineDecayAfterTotalSteps(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 100 warmup_learning_rate = 0.1 warmup_steps = 9 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0] input_global_steps = [101] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testCosineDecayWithHoldBaseLearningRateSteps(self): def graph_fn(global_step): learning_rate_base = 1.0 total_steps = 120 warmup_learning_rate = 0.1 warmup_steps = 9 hold_base_rate_steps = 20 learning_rate = learning_schedules.cosine_decay_with_warmup( global_step, learning_rate_base, total_steps, warmup_learning_rate, warmup_steps, hold_base_rate_steps) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) exp_rates = [0.1, 0.5, 0.9, 1.0, 1.0, 1.0, 0.999702, 0.874255, 0.577365, 0.0] input_global_steps = [0, 4, 8, 9, 10, 29, 30, 50, 70, 120] output_rates = [ self.execute(graph_fn, [np.array(step).astype(np.int64)]) for step in input_global_steps ] self.assertAllClose(output_rates, exp_rates) def testManualStepping(self): def graph_fn(global_step): boundaries = [2, 3, 7] rates = [1.0, 2.0, 3.0, 4.0] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(10) ] exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0] self.assertAllClose(output_rates, exp_rates) def testManualSteppingWithWarmup(self): def graph_fn(global_step): boundaries = [4, 6, 8] rates = [0.02, 0.10, 0.01, 0.001] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates, warmup=True) assert learning_rate.op.name.endswith('learning_rate') return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9) ] exp_rates = [0.02, 0.04, 0.06, 0.08, 0.10, 0.10, 0.01, 0.01, 0.001] self.assertAllClose(output_rates, exp_rates) def testManualSteppingWithZeroBoundaries(self): def graph_fn(global_step): boundaries = [] rates = [0.01] learning_rate = learning_schedules.manual_stepping( global_step, boundaries, rates) return (learning_rate,) output_rates = [ self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(4) ] exp_rates = [0.01] * 4 self.assertAllClose(output_rates, exp_rates) if __name__ == '__main__': tf.test.main()
TensorFlow/Classification/ConvNets/triton
triton
run_inference_on_fw
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model on framework runtime, you can use `run_inference_on_fw.py` script. It infers data obtained from pointed data loader locally and saves received data into npz files. Those files are stored in directory pointed by `--output-dir` argument. Example call: ```shell script python ./triton/run_inference_on_fw.py \ --input-path /models/exported/model.onnx \ --input-type onnx \ --dataloader triton/dataloader.py \ --data-dir /data/imagenet \ --batch-size 32 \ --output-dir /results/dump_local \ --dump-labels ``` """ import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0" from tqdm import tqdm # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, BaseLoader, BaseRunner, Format, load_from_file from .deployment_toolkit.dump import JsonDumpWriter from .deployment_toolkit.extensions import loaders, runners LOGGER = logging.getLogger("run_inference_on_fw") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data def _parse_and_validate_args(): supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions) parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model", required=True) parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) Runner: BaseRunner = runners.get(args.input_type) ArgParserGenerator(Runner).update_argparser(parser) args = parser.parse_args() types_requiring_io_params = [] if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outputs]): parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters") return args def main(): args = _parse_and_validate_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") Loader: BaseLoader = loaders.get(args.input_type) Runner: BaseRunner = runners.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) runner = ArgParserGenerator(Runner).from_args(args) LOGGER.info(f"Loading {args.input_path}") model = loader.load(args.input_path) with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) LOGGER.info(f"Data loader initialized; Running inference") for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10): y_pred = runner_session(x) data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real) writer.write(**data) LOGGER.info(f"Inference finished") if __name__ == "__main__": main()
PaddlePaddle/Classification/RN50v1.5
RN50v1.5
train
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from dali import build_dataloader from utils.affinity import set_cpu_affinity from utils.config import parse_args, print_args from utils.logger import setup_dllogger from utils.mode import Mode, RunScope from utils.save_load import init_program, save_model import paddle import program from paddle.distributed import fleet from paddle.static.amp.fp16_lists import AutoMixedPrecisionLists from paddle.static.amp.fp16_utils import cast_model_to_fp16 from paddle.incubate import asp as sparsity class MetricSummary: def __init__(self): super().__init__() self.metric_dict = None def update(self, new_metrics): if not self.is_updated: self.metric_dict = {} for key in new_metrics: if key in self.metric_dict: # top1, top5 and ips are "larger is better" if key in ['top1', 'top5', 'ips']: self.metric_dict[key] = ( new_metrics[key] if new_metrics[key] > self.metric_dict[key] else self.metric_dict[key] ) # Others are "Smaller is better" else: self.metric_dict[key] = ( new_metrics[key] if new_metrics[key] < self.metric_dict[key] else self.metric_dict[key] ) else: self.metric_dict[key] = new_metrics[key] @property def is_updated(self): return self.metric_dict is not None def main(args): """ A enterpoint to train and evaluate a ResNet50 model, which contains six steps. 1. Parse arguments from command line. 2. Initialize distributed training related setting, including CPU affinity. 3. Build dataloader via DALI. 4. Create training and evaluating Paddle.static.Program. 5. Load checkpoint or pretrained model if given. 6. Run program (train and evaluate with datasets, then save model if necessary). """ setup_dllogger(args.report_file) if args.show_config: print_args(args) fleet.init(is_collective=True) if args.enable_cpu_affinity: set_cpu_affinity() device = paddle.set_device('gpu') startup_prog = paddle.static.Program() train_dataloader = None train_prog = None optimizer = None if args.run_scope in [RunScope.TRAIN_EVAL, RunScope.TRAIN_ONLY]: train_dataloader = build_dataloader(args, Mode.TRAIN) train_step_each_epoch = len(train_dataloader) train_prog = paddle.static.Program() train_fetchs, lr_scheduler, _, optimizer = program.build( args, train_prog, startup_prog, step_each_epoch=train_step_each_epoch, is_train=True, ) eval_dataloader = None eval_prog = None if args.run_scope in [RunScope.TRAIN_EVAL, RunScope.EVAL_ONLY]: eval_dataloader = build_dataloader(args, Mode.EVAL) eval_step_each_epoch = len(eval_dataloader) eval_prog = paddle.static.Program() eval_fetchs, _, _, _ = program.build( args, eval_prog, startup_prog, step_each_epoch=eval_step_each_epoch, is_train=False, ) # clone to prune some content which is irrelevant in eval_prog eval_prog = eval_prog.clone(for_test=True) exe = paddle.static.Executor(device) exe.run(startup_prog) init_program( args, exe=exe, program=train_prog if train_prog is not None else eval_prog, ) if args.amp: if args.run_scope == RunScope.EVAL_ONLY: cast_model_to_fp16( eval_prog, AutoMixedPrecisionLists(), use_fp16_guard=False, level='O1', ) else: optimizer.amp_init( device, scope=paddle.static.global_scope(), test_program=eval_prog, use_fp16_test=True, ) if args.asp and args.prune_model: logging.info("Pruning model to 2:4 sparse pattern...") sparsity.prune_model(train_prog, mask_algo=args.mask_algo) logging.info("Pruning model done.") if eval_prog is not None: eval_prog = program.compile_prog(args, eval_prog, is_train=False) train_summary = MetricSummary() eval_summary = MetricSummary() for epoch_id in range(args.start_epoch, args.epochs): # Training if train_prog is not None: metric_summary = program.run( args, train_dataloader, exe, train_prog, train_fetchs, epoch_id, Mode.TRAIN, lr_scheduler, ) train_summary.update(metric_summary) # Save a checkpoint if epoch_id % args.save_interval == 0: model_path = os.path.join(args.output_dir, args.model_arch_name) save_model(train_prog, model_path, epoch_id, args.model_prefix) # Evaluation if (eval_prog is not None) and (epoch_id % args.eval_interval == 0): metric_summary = program.run( args, eval_dataloader, exe, eval_prog, eval_fetchs, epoch_id, Mode.EVAL, ) eval_summary.update(metric_summary) if train_summary.is_updated: program.log_info((), train_summary.metric_dict, Mode.TRAIN) if eval_summary.is_updated: program.log_info((), eval_summary.metric_dict, Mode.EVAL) if __name__ == '__main__': paddle.enable_static() main(parse_args())
TensorFlow/Recommendation/NCF
NCF
requirements
pandas git+https://github.com/NVIDIA/dllogger#egg=dllogger mpi4py
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/preprocessing/datasets
datasets
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .cora import CORAPreprocessing from .epinions import EpinionsPreprocessing from .ogbn_mag import OGBN_MAG_Preprocessing from .ogbn_mag240m import MAG240mPreprocessing from .ieee import IEEEPreprocessing from .tabformer import TabFormerPreprocessing DATASETS = { 'cora': CORAPreprocessing, 'epinions': EpinionsPreprocessing, 'ogbn_mag': OGBN_MAG_Preprocessing, 'ogbn_mag240m': MAG240mPreprocessing, 'ieee': IEEEPreprocessing, 'tabformer': TabFormerPreprocessing, }
PyTorch/SpeechSynthesis/Tacotron2/scripts
scripts
train_waveglow
mkdir -p output python -m multiproc train.py -m WaveGlow -o ./output/ -lr 1e-4 --epochs 1501 -bs 4 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 3.4028234663852886e+38 --cudnn-enabled --cudnn-benchmark --log-file nvlog.json
PyTorch/Translation/Transformer/scripts
scripts
draw_summary
import json import argparse from collections import defaultdict, OrderedDict import matplotlib.pyplot as plt import numpy as np def smooth_moving_average(x, n): fil = np.ones(n)/n smoothed = np.convolve(x, fil, mode='valid') smoothed = np.concatenate((x[:n-1], smoothed), axis=0) return smoothed def moving_stdev(x, n): fil = np.ones(n)/n avg_sqare = np.convolve(np.power(x, 2), fil, mode='valid') squared_avg = np.power(np.convolve(x, fil, mode='valid'), 2) var = avg_sqare - squared_avg stdev = np.sqrt(var) #pad first few values stdev = np.concatenate(([0]*(n-1), stdev), axis=0) return stdev def get_plot(log): steps = [x[0] for x in log if isinstance(x[0], int)] values = [x[2] for x in log if isinstance(x[0], int)] return steps, values def highlight_max_point(plot, color): point = max(zip(*plot), key=lambda x: x[1]) plt.plot(point[0], point[1], 'bo-', color=color) plt.annotate("{:.2f}".format(point[1]), point) return point def main(args): jlog = defaultdict(list) jlog['parameters'] = {} with open(args.log_file, 'r') as f: for line in f.readlines(): line_dict = json.loads(line[5:]) if line_dict['type'] == 'LOG': if line_dict['step'] == 'PARAMETER': jlog['parameters'].update(line_dict['data']) elif line_dict['step'] == [] and 'training_summary' not in jlog: jlog['training_summary']=line_dict['data'] else: for k, v in line_dict['data'].items(): jlog[k].append((line_dict['step'], line_dict['elapsedtime'], v)) fig, ax1 = plt.subplots(figsize=(20,5)) fig.suptitle(args.title, fontsize=16) ax1.set_xlabel('steps') ax1.set_ylabel('loss') # Define colors for specific curves VAL_LOSS_COLOR = 'blue' VAL_BLEU_COLOR = 'red' TEST_BLEU_COLOR = 'pink' # Plot smoothed loss curve steps, loss = get_plot(jlog['loss']) smoothed_loss = smooth_moving_average(loss, 150) stdev = moving_stdev(loss, 150) ax1.plot(steps, smoothed_loss, label='Training loss') ax1.plot(steps, smoothed_loss + stdev, '--', color='orange', linewidth=0.3, label='Stdev') ax1.plot(steps, smoothed_loss - stdev, '--', color='orange', linewidth=0.3) # Plot validation loss curve val_steps, val_loss = get_plot(jlog['val_loss']) ax1.plot(val_steps, val_loss, color='blue', label='Validation loss') min_val_loss_step = val_steps[np.argmin(val_loss)] ax1.axvline(min_val_loss_step, linestyle='dashed', color=VAL_LOSS_COLOR, linewidth=0.5, label='Validation loss minimum') # Plot BLEU curves ax2 = ax1.twinx() ax2.set_ylabel('BLEU') val_steps, val_bleu = get_plot(jlog['val_bleu']) ax2.plot(val_steps, val_bleu, color=VAL_BLEU_COLOR, label='Validation BLEU') mvb_step, _ =highlight_max_point((val_steps,val_bleu), color=VAL_BLEU_COLOR) # values to be labeled on plot max_val_bleu_step = val_steps[np.argmax(val_bleu)] max_val_bleu = val_bleu[val_steps.index(max_val_bleu_step)] min_loss_bleu = val_bleu[val_steps.index(min_val_loss_step)] if 'test_bleu' in jlog: test_steps, test_bleu = get_plot(jlog['test_bleu']) ax2.plot(val_steps, test_bleu, color=TEST_BLEU_COLOR, label='Test BLEU') highlight_max_point((test_steps, test_bleu), color=TEST_BLEU_COLOR) ax2.tick_params(axis='y') # Annotate points with highest BLEU score as well as those for minimal validation loss ax2.plot(min_val_loss_step, min_loss_bleu, 'bo-', color=VAL_BLEU_COLOR) ax2.annotate("{:.2f}".format(min_loss_bleu), (min_val_loss_step, min_loss_bleu)) if 'test_bleu' in jlog: min_loss_test_bleu = test_bleu[val_steps.index(min_val_loss_step)] #BLEU score on test set when validation loss is minimal ax2.plot(min_val_loss_step, min_loss_test_bleu, 'bo-', color=TEST_BLEU_COLOR) ax2.annotate("{:.2f}".format(min_loss_test_bleu), (min_val_loss_step, min_loss_test_bleu)) max_val_bleu_test = test_bleu[val_steps.index(max_val_bleu_step)] #BLEU score on test set when BLEU score on dev set is maximal ax2.plot(mvb_step, max_val_bleu_test, 'bo-', color=TEST_BLEU_COLOR) ax2.annotate("{:.2f}".format(max_val_bleu_test), (max_val_bleu_step, max_val_bleu_test)) ax1.legend(loc='lower left', bbox_to_anchor=(1,0)) ax2.legend(loc='upper left', bbox_to_anchor=(1,1)) plt.grid() plt.savefig(args.output) # Produce json with training summary if args.dump_json: summary = OrderedDict() summary['args'] = OrderedDict(jlog['parameters']) summary['min_val_loss'] = min(val_loss) summary['max_val_bleu'] = max(val_bleu) summary['max_test_bleu'] = max(test_bleu) summary['final_values'] = jlog['training_summary'] summary['avg_epoch_loss'] = [x.mean() for x in np.array_split(np.array(loss), jlog['parameters']['max_epoch'])] summary['min_val_loss_step'] = min_val_loss_step json.dump(summary, open(args.dump_json, 'w')) if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('--title', type=str) parser.add_argument('--log-file', type=str) parser.add_argument('--output' ,'-o', type=str) parser.add_argument('--dump-json', '-j', type=str) args = parser.parse_args() main(args)
TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/utils
utils
feature_description
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. DISPLAY_ID_COLUMN = "display_id" BASE_CONT_COLUMNS = [ "publish_time", "publish_time_promo", "timestamp", "document_id_promo_clicked_sum_ctr", "publisher_id_promo_clicked_sum_ctr", "source_id_promo_clicked_sum_ctr", "document_id_promo_count", "publish_time_days_since_published", "ad_id_clicked_sum_ctr", "advertiser_id_clicked_sum_ctr", "campaign_id_clicked_sum_ctr", "ad_id_count", "publish_time_promo_days_since_published", ] SIM_COLUMNS = [ "doc_event_doc_ad_sim_categories", "doc_event_doc_ad_sim_topics", "doc_event_doc_ad_sim_entities", ] CONTINUOUS_COLUMNS = BASE_CONT_COLUMNS + SIM_COLUMNS + [DISPLAY_ID_COLUMN] exclude_conts = ["publish_time", "publish_time_promo", "timestamp"] NUMERIC_COLUMNS = [col for col in CONTINUOUS_COLUMNS if col not in exclude_conts] CATEGORICAL_COLUMNS = [ "ad_id", "document_id", "platform", "document_id_promo", "campaign_id", "advertiser_id", "source_id", "publisher_id", "source_id_promo", "publisher_id_promo", ] CTR_INPUTS = [ "ad_id", "source_id_promo", "document_id_promo", "publisher_id_promo", "advertiser_id", "campaign_id", ] EXCLUDE_COLUMNS = [ "publish_time", "publish_time_promo", "timestamp", "ad_id_clicked_sum", "source_id_promo_count", "source_id_promo_clicked_sum", "document_id_promo_clicked_sum", "publisher_id_promo_count", "publisher_id_promo_clicked_sum", "advertiser_id_count", "advertiser_id_clicked_sum", "campaign_id_count", "campaign_id_clicked_sum", "uuid", "day_event", ]
TensorFlow/Detection/SSD/models/research/object_detection/matchers
matchers
argmax_matcher
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Argmax matcher implementation. This class takes a similarity matrix and matches columns to rows based on the maximum value per column. One can specify matched_thresholds and to prevent columns from matching to rows (generally resulting in a negative training example) and unmatched_theshold to ignore the match (generally resulting in neither a positive or negative training example). This matcher is used in Fast(er)-RCNN. Note: matchers are used in TargetAssigners. There is a create_target_assigner factory function for popular implementations. """ import tensorflow as tf from object_detection.core import matcher from object_detection.utils import shape_utils class ArgMaxMatcher(matcher.Matcher): """Matcher based on highest value. This class computes matches from a similarity matrix. Each column is matched to a single row. To support object detection target assignment this class enables setting both matched_threshold (upper threshold) and unmatched_threshold (lower thresholds) defining three categories of similarity which define whether examples are positive, negative, or ignored: (1) similarity >= matched_threshold: Highest similarity. Matched/Positive! (2) matched_threshold > similarity >= unmatched_threshold: Medium similarity. Depending on negatives_lower_than_unmatched, this is either Unmatched/Negative OR Ignore. (3) unmatched_threshold > similarity: Lowest similarity. Depending on flag negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore. For ignored matches this class sets the values in the Match object to -2. """ def __init__(self, matched_threshold, unmatched_threshold=None, negatives_lower_than_unmatched=True, force_match_for_each_row=False, use_matmul_gather=False): """Construct ArgMaxMatcher. Args: matched_threshold: Threshold for positive matches. Positive if sim >= matched_threshold, where sim is the maximum value of the similarity matrix for a given column. Set to None for no threshold. unmatched_threshold: Threshold for negative matches. Negative if sim < unmatched_threshold. Defaults to matched_threshold when set to None. negatives_lower_than_unmatched: Boolean which defaults to True. If True then negative matches are the ones below the unmatched_threshold, whereas ignored matches are in between the matched and umatched threshold. If False, then negative matches are in between the matched and unmatched threshold, and everything lower than unmatched is ignored. force_match_for_each_row: If True, ensures that each row is matched to at least one column (which is not guaranteed otherwise if the matched_threshold is high). Defaults to False. See argmax_matcher_test.testMatcherForceMatch() for an example. use_matmul_gather: Force constructed match objects to use matrix multiplication based gather instead of standard tf.gather. (Default: False). Raises: ValueError: if unmatched_threshold is set but matched_threshold is not set or if unmatched_threshold > matched_threshold. """ super(ArgMaxMatcher, self).__init__(use_matmul_gather=use_matmul_gather) if (matched_threshold is None) and (unmatched_threshold is not None): raise ValueError('Need to also define matched_threshold when' 'unmatched_threshold is defined') self._matched_threshold = matched_threshold if unmatched_threshold is None: self._unmatched_threshold = matched_threshold else: if unmatched_threshold > matched_threshold: raise ValueError('unmatched_threshold needs to be smaller or equal' 'to matched_threshold') self._unmatched_threshold = unmatched_threshold if not negatives_lower_than_unmatched: if self._unmatched_threshold == self._matched_threshold: raise ValueError('When negatives are in between matched and ' 'unmatched thresholds, these cannot be of equal ' 'value. matched: {}, unmatched: {}'.format( self._matched_threshold, self._unmatched_threshold)) self._force_match_for_each_row = force_match_for_each_row self._negatives_lower_than_unmatched = negatives_lower_than_unmatched def _match(self, similarity_matrix, valid_rows): """Tries to match each column of the similarity matrix to a row. Args: similarity_matrix: tensor of shape [N, M] representing any similarity metric. valid_rows: a boolean tensor of shape [N] indicating valid rows. Returns: Match object with corresponding matches for each of M columns. """ def _match_when_rows_are_empty(): """Performs matching when the rows of similarity matrix are empty. When the rows are empty, all detections are false positives. So we return a tensor of -1's to indicate that the columns do not match to any rows. Returns: matches: int32 tensor indicating the row each column matches to. """ similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( similarity_matrix) return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32) def _match_when_rows_are_non_empty(): """Performs matching when the rows of similarity matrix are non empty. Returns: matches: int32 tensor indicating the row each column matches to. """ # Matches for each column matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32) # Deal with matched and unmatched threshold if self._matched_threshold is not None: # Get logical indices of ignored and unmatched columns as tf.int64 matched_vals = tf.reduce_max(similarity_matrix, 0) below_unmatched_threshold = tf.greater(self._unmatched_threshold, matched_vals) between_thresholds = tf.logical_and( tf.greater_equal(matched_vals, self._unmatched_threshold), tf.greater(self._matched_threshold, matched_vals)) if self._negatives_lower_than_unmatched: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -1) matches = self._set_values_using_indicator(matches, between_thresholds, -2) else: matches = self._set_values_using_indicator(matches, below_unmatched_threshold, -2) matches = self._set_values_using_indicator(matches, between_thresholds, -1) if self._force_match_for_each_row: similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape( similarity_matrix) force_match_column_ids = tf.argmax(similarity_matrix, 1, output_type=tf.int32) force_match_column_indicators = ( tf.one_hot( force_match_column_ids, depth=similarity_matrix_shape[1]) * tf.cast(tf.expand_dims(valid_rows, axis=-1), dtype=tf.float32)) force_match_row_ids = tf.argmax(force_match_column_indicators, 0, output_type=tf.int32) force_match_column_mask = tf.cast( tf.reduce_max(force_match_column_indicators, 0), tf.bool) final_matches = tf.where(force_match_column_mask, force_match_row_ids, matches) return final_matches else: return matches if similarity_matrix.shape.is_fully_defined(): if similarity_matrix.shape[0].value == 0: return _match_when_rows_are_empty() else: return _match_when_rows_are_non_empty() else: return tf.cond( tf.greater(tf.shape(similarity_matrix)[0], 0), _match_when_rows_are_non_empty, _match_when_rows_are_empty) def _set_values_using_indicator(self, x, indicator, val): """Set the indicated fields of x to val. Args: x: tensor. indicator: boolean with same shape as x. val: scalar with value to set. Returns: modified tensor. """ indicator = tf.cast(indicator, x.dtype) return tf.add(tf.multiply(x, 1 - indicator), val * indicator)
PaddlePaddle/LanguageModeling/BERT/data
data
TextSharding
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from itertools import islice import multiprocessing import statistics class Sharding: def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set): assert len(input_files ) > 0, 'The input file list must contain at least one file.' assert n_training_shards > 0, 'There must be at least one output shard.' assert n_test_shards > 0, 'There must be at least one output shard.' self.n_training_shards = n_training_shards self.n_test_shards = n_test_shards self.fraction_test_set = fraction_test_set self.input_files = input_files self.output_name_prefix = output_name_prefix self.output_training_identifier = '_training' self.output_test_identifier = '_test' self.output_file_extension = '.txt' self.articles = {} # key: integer identifier, value: list of articles self.sentences = { } # key: integer identifier, value: list of sentences self.output_training_files = { } # key: filename, value: list of articles to go into file self.output_test_files = { } # key: filename, value: list of articles to go into file self.init_output_files() # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines) def load_articles(self): print('Start: Loading Articles') global_article_count = 0 for input_file in self.input_files: print('input file:', input_file) with open(input_file, mode='r', newline='\n') as f: for _, line in enumerate(f): if line.strip(): self.articles[global_article_count] = line.rstrip() global_article_count += 1 print('End: Loading Articles: There are', len(self.articles), 'articles.') def segment_articles_into_sentences(self, segmenter): print('Start: Sentence Segmentation') if len(self.articles) == 0: self.load_articles() assert len( self.articles ) != 0, 'Please check that input files are present and contain data.' use_multiprocessing = 'serial' def chunks(data, size=len(self.articles)): it = iter(data) for _ in range(0, len(data), size): yield {k: data[k] for k in islice(it, size)} if use_multiprocessing == 'manager': manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] n_processes = 7 # in addition to the main process, total = n_proc+1 def work(articles, return_dict): sentences = {} for i, article in enumerate(articles): sentences[i] = segmenter.segment_string(articles[article]) if i % 5000 == 0: print('Segmenting article', i) return_dict.update(sentences) for item in chunks(self.articles, len(self.articles)): p = multiprocessing.Process( target=work, args=(item, return_dict)) # Busy wait while len(jobs) >= n_processes: pass jobs.append(p) p.start() for proc in jobs: proc.join() elif use_multiprocessing == 'queue': multiprocessing.Queue() jobs = [] for item in chunks(self.articles, len(self.articles)): pass else: # serial option for i, article in enumerate(self.articles): self.sentences[i] = segmenter.segment_string(self.articles[ article]) if i % 5000 == 0: print('Segmenting article', i) print('End: Sentence Segmentation') def init_output_files(self): print('Start: Init Output Files') assert len( self.output_training_files ) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' assert len( self.output_test_files ) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' for i in range(self.n_training_shards): name = self.output_name_prefix + self.output_training_identifier + '_' + str( i) + self.output_file_extension self.output_training_files[name] = [] for i in range(self.n_test_shards): name = self.output_name_prefix + self.output_test_identifier + '_' + str( i) + self.output_file_extension self.output_test_files[name] = [] print('End: Init Output Files') def get_sentences_per_shard(self, shard): result = 0 for article_id in shard: result += len(self.sentences[article_id]) return result def distribute_articles_over_shards(self): print('Start: Distribute Articles Over Shards') assert len( self.articles ) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.' # Create dictionary with - key: sentence count per article, value: article id number sentence_counts = defaultdict(lambda: []) max_sentences = 0 total_sentences = 0 for article_id in self.sentences: current_length = len(self.sentences[article_id]) sentence_counts[current_length].append(article_id) max_sentences = max(max_sentences, current_length) total_sentences += current_length n_sentences_assigned_to_training = int( (1 - self.fraction_test_set) * total_sentences) nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards nominal_sentences_per_test_shard = ( total_sentences - n_sentences_assigned_to_training ) // self.n_test_shards consumed_article_set = set({}) unused_article_set = set(self.articles.keys()) # Make first pass and add one article worth of lines per file for file in self.output_training_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[ max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[current_article_id] ) > nominal_sentences_per_training_shard: nominal_sentences_per_training_shard = len(self.sentences[ current_article_id]) print( 'Warning: A single article contains more than the nominal number of sentences per training shard.' ) for file in self.output_test_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[ max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[ current_article_id]) > nominal_sentences_per_test_shard: nominal_sentences_per_test_shard = len(self.sentences[ current_article_id]) print( 'Warning: A single article contains more than the nominal number of sentences per test shard.' ) training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append( self.get_sentences_per_shard(self.output_training_files[ shard])) for shard in self.output_test_files: test_counts.append( self.get_sentences_per_shard(self.output_test_files[shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) # Make subsequent passes over files to find articles to add without going over limit history_remaining = [] n_history_remaining = 4 while len(consumed_article_set) < len(self.articles): for fidx, file in enumerate(self.output_training_files): nominal_next_article_size = min( nominal_sentences_per_training_shard - training_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[ max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size] ) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or training_counts[ fidx] > training_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[ nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) for fidx, file in enumerate(self.output_test_files): nominal_next_article_size = min( nominal_sentences_per_test_shard - test_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[ max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size] ) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or test_counts[ fidx] > test_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[ nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed if len(history_remaining) == n_history_remaining: history_remaining.pop(0) history_remaining.append(len(unused_article_set)) history_same = True for i in range(1, len(history_remaining)): history_same = history_same and ( history_remaining[i - 1] == history_remaining[i]) if history_same: nominal_sentences_per_training_shard += 1 # nominal_sentences_per_test_shard += 1 training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append( self.get_sentences_per_shard(self.output_training_files[ shard])) for shard in self.output_test_files: test_counts.append( self.get_sentences_per_shard(self.output_test_files[ shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) print('Distributing data over shards:', len(unused_article_set), 'articles remaining.') if len(unused_article_set) != 0: print('Warning: Some articles did not make it into output files.') for shard in self.output_training_files: print('Training shard:', self.get_sentences_per_shard(self.output_training_files[ shard])) for shard in self.output_test_files: print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard])) print('End: Distribute Articles Over Shards') def write_shards_to_disk(self): print('Start: Write Shards to Disk') for shard in self.output_training_files: self.write_single_shard(shard, self.output_training_files[shard]) for shard in self.output_test_files: self.write_single_shard(shard, self.output_test_files[shard]) print('End: Write Shards to Disk') def write_single_shard(self, shard_name, shard): with open(shard_name, mode='w', newline='\n') as f: for article_id in shard: for line in self.sentences[article_id]: f.write(line + '\n') f.write('\n') # Line break between articles import nltk nltk.download('punkt') class NLTKSegmenter: def __init(self): pass def segment_string(self, article): return nltk.tokenize.sent_tokenize(article)
PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text
text
symbols
""" from https://github.com/keithito/tacotron """ ''' Defines the set of symbols used in text input to the model. The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. ''' from tacotron2.text import cmudict _pad = '_' _punctuation = '!\'(),.:;? ' _special = '-' _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' # Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters): _arpabet = ['@' + s for s in cmudict.valid_symbols] # Export all symbols: symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
PyTorch/Recommendation/DLRM
DLRM
requirements
git+https://github.com/NVIDIA/dllogger#egg=dllogger pandas absl-py>=0.7.0
TensorFlow/Detection/SSD/models/research/slim/nets
nets
mobilenet_v1
# Mobilenet_v2 For Mobilenet V2 see this file [mobilenet/README.md] # MobileNet_v1 [MobileNets](https://arxiv.org/abs/1704.04861) are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as Inception, are used. MobileNets can be run efficiently on mobile devices with [TensorFlow Mobile](https://www.tensorflow.org/mobile/). MobileNets trade off between latency, size and accuracy while comparing favorably with popular models from the literature. ![alt text](mobilenet_v1.png "MobileNet Graph") # Pre-trained Models Choose the right MobileNet model to fit your latency and size budget. The size of the network in memory and on disk is proportional to the number of parameters. The latency and power usage of the network scales with the number of Multiply-Accumulates (MACs) which measures the number of fused Multiplication and Addition operations. These MobileNet models have been trained on the [ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/) image classification dataset. Accuracies were computed by evaluating using a single image crop. Model | Million MACs | Million Parameters | Top-1 Accuracy| Top-5 Accuracy | :----:|:------------:|:----------:|:-------:|:-------:| [MobileNet_v1_1.0_224](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz)|569|4.24|70.9|89.9| [MobileNet_v1_1.0_192](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_192.tgz)|418|4.24|70.0|89.2| [MobileNet_v1_1.0_160](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_160.tgz)|291|4.24|68.0|87.7| [MobileNet_v1_1.0_128](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_128.tgz)|186|4.24|65.2|85.8| [MobileNet_v1_0.75_224](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_224.tgz)|317|2.59|68.4|88.2| [MobileNet_v1_0.75_192](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_192.tgz)|233|2.59|67.2|87.3| [MobileNet_v1_0.75_160](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_160.tgz)|162|2.59|65.3|86.0| [MobileNet_v1_0.75_128](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_128.tgz)|104|2.59|62.1|83.9| [MobileNet_v1_0.50_224](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_224.tgz)|150|1.34|63.3|84.9| [MobileNet_v1_0.50_192](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_192.tgz)|110|1.34|61.7|83.6| [MobileNet_v1_0.50_160](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_160.tgz)|77|1.34|59.1|81.9| [MobileNet_v1_0.50_128](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_128.tgz)|49|1.34|56.3|79.4| [MobileNet_v1_0.25_224](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_224.tgz)|41|0.47|49.8|74.2| [MobileNet_v1_0.25_192](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_192.tgz)|34|0.47|47.7|72.3| [MobileNet_v1_0.25_160](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_160.tgz)|21|0.47|45.5|70.3| [MobileNet_v1_0.25_128](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_128.tgz)|14|0.47|41.5|66.3| [MobileNet_v1_1.0_224_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz)|569|4.24|70.1|88.9| [MobileNet_v1_1.0_192_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_192_quant.tgz)|418|4.24|69.2|88.3| [MobileNet_v1_1.0_160_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_160_quant.tgz)|291|4.24|67.2|86.7| [MobileNet_v1_1.0_128_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_128_quant.tgz)|186|4.24|63.4|84.2| [MobileNet_v1_0.75_224_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_224_quant.tgz)|317|2.59|66.8|87.0| [MobileNet_v1_0.75_192_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_192_quant.tgz)|233|2.59|66.1|86.4| [MobileNet_v1_0.75_160_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_160_quant.tgz)|162|2.59|62.3|83.8| [MobileNet_v1_0.75_128_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.75_128_quant.tgz)|104|2.59|55.8|78.8| [MobileNet_v1_0.50_224_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_224_quant.tgz)|150|1.34|60.7|83.2| [MobileNet_v1_0.50_192_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_192_quant.tgz)|110|1.34|60.0|82.2| [MobileNet_v1_0.50_160_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_160_quant.tgz)|77|1.34|57.7|80.4| [MobileNet_v1_0.50_128_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.5_128_quant.tgz)|49|1.34|54.5|77.7| [MobileNet_v1_0.25_224_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_224_quant.tgz)|41|0.47|48.0|72.8| [MobileNet_v1_0.25_192_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_192_quant.tgz)|34|0.47|46.0|71.2| [MobileNet_v1_0.25_160_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_160_quant.tgz)|21|0.47|43.4|68.5| [MobileNet_v1_0.25_128_quant](http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_0.25_128_quant.tgz)|14|0.47|39.5|64.4| Revisions to models: * July 12, 2018: Update to TFLite models that fixes an accuracy issue resolved by making conversion support weights with narrow_range. We now report validation on the actual TensorFlow Lite model rather than the emulated quantization number of TensorFlow. * August 2, 2018: Update to TFLite models that fixes an accuracy issue resolved by making sure the numerics of quantization match TF quantized training accurately. The linked model tar files contain the following: * Trained model checkpoints * Eval graph text protos (to be easily viewed) * Frozen trained models * Info file containing input and output information * Converted [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/) flatbuffer model Note that quantized model GraphDefs are still float models, they just have FakeQuantization operation embedded to simulate quantization. These are converted by [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/) to be fully quantized. The final effect of quantization can be seen by comparing the frozen fake quantized graph to the size of the TFLite flatbuffer, i.e. The TFLite flatbuffer is about 1/4 the size. For more information on the quantization techniques used here, see [here](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/quantize). Here is an example of how to download the MobileNet_v1_1.0_224 checkpoint: ```shell $ CHECKPOINT_DIR=/tmp/checkpoints $ mkdir ${CHECKPOINT_DIR} $ wget http://download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_1.0_224.tgz $ tar -xvf mobilenet_v1_1.0_224.tgz $ mv mobilenet_v1_1.0_224.ckpt.* ${CHECKPOINT_DIR} ``` # MobileNet V1 scripts This package contains scripts for training floating point and eight-bit fixed point TensorFlow models. Quantization tools used are described in [contrib/quantize](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/quantize). Conversion to fully quantized models for mobile can be done through [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/). ## Usage ### Build for GPU ``` $ bazel build -c opt --config=cuda mobilenet_v1_{eval,train} ``` ### Running #### Float Training and Eval Train: ``` $ ./bazel-bin/mobilenet_v1_train --dataset_dir "path/to/dataset" --checkpoint_dir "path/to/checkpoints" ``` Eval: ``` $ ./bazel-bin/mobilenet_v1_eval --dataset_dir "path/to/dataset" --checkpoint_dir "path/to/checkpoints" ``` #### Quantized Training and Eval Train from preexisting float checkpoint: ``` $ ./bazel-bin/mobilenet_v1_train --dataset_dir "path/to/dataset" --checkpoint_dir "path/to/checkpoints" \ --quantize=True --fine_tune_checkpoint=float/checkpoint/path ``` Train from scratch: ``` $ ./bazel-bin/mobilenet_v1_train --dataset_dir "path/to/dataset" --checkpoint_dir "path/to/checkpoints" --quantize=True ``` Eval: ``` $ ./bazel-bin/mobilenet_v1_eval --dataset_dir "path/to/dataset" --checkpoint_dir "path/to/checkpoints" --quantize=True ``` The resulting float and quantized models can be run on-device via [TensorFlow Lite](https://www.tensorflow.org/mobile/tflite/).
PyTorch/Recommendation/DLRM
DLRM
setup
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from setuptools import setup, find_packages from torch.utils.cpp_extension import BuildExtension, CUDAExtension abspath = os.path.dirname(os.path.realpath(__file__)) print(find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])) setup(name="dlrm", package_dir={'dlrm': 'dlrm'}, version="1.0.0", description="Reimplementation of Facebook's DLRM", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), zip_safe=False, ext_modules=[ CUDAExtension(name="dlrm.cuda_ext.fused_embedding", sources=[ os.path.join(abspath, "dlrm/cuda_src/pytorch_embedding_ops.cpp"), os.path.join(abspath, "dlrm/cuda_src/gather_gpu_fused_pytorch_impl.cu") ], extra_compile_args={ 'cxx': [], 'nvcc': ["-arch=sm_70", '-gencode', 'arch=compute_80,code=sm_80'] }), CUDAExtension(name="dlrm.cuda_ext.interaction_volta", sources=[ os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_volta/pytorch_ops.cpp"), os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_volta/dot_based_interact_pytorch_types.cu") ], extra_compile_args={ 'cxx': [], 'nvcc': [ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', '-gencode', 'arch=compute_70,code=sm_70'] }), CUDAExtension(name="dlrm.cuda_ext.interaction_ampere", sources=[ os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_ampere/pytorch_ops.cpp"), os.path.join(abspath, "dlrm/cuda_src/dot_based_interact_ampere/dot_based_interact_pytorch_types.cu") ], extra_compile_args={ 'cxx': [], 'nvcc': [ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', '-gencode', 'arch=compute_80,code=sm_80'] }), CUDAExtension(name="dlrm.cuda_ext.sparse_gather", sources=[ os.path.join(abspath, "dlrm/cuda_src/sparse_gather/sparse_pytorch_ops.cpp"), os.path.join(abspath, "dlrm/cuda_src/sparse_gather/gather_gpu.cu") ], extra_compile_args={ 'cxx': [], 'nvcc': ["-arch=sm_70", '-gencode', 'arch=compute_80,code=sm_80'] }) ], cmdclass={"build_ext": BuildExtension})
PyTorch/Detection/Efficientdet/effdet/layers
layers
mixed_conv2d
""" PyTorch Mixed Convolution Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) Hacked together by / Copyright 2020 Ross Wightman """ # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019-2022 Ross Wightman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn as nn from .conv2d_same import create_conv2d_pad def _split_channels(num_chan, num_groups): split = [num_chan // num_groups for _ in range(num_groups)] split[0] += num_chan - sum(split) return split class MixedConv2d(nn.ModuleDict): """ Mixed Grouped Convolution Based on MDConv and GroupedConv in MixNet impl: https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py """ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs): super(MixedConv2d, self).__init__() kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] num_groups = len(kernel_size) in_splits = _split_channels(in_channels, num_groups) out_splits = _split_channels(out_channels, num_groups) self.in_channels = sum(in_splits) self.out_channels = sum(out_splits) for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): conv_groups = out_ch if depthwise else 1 # use add_module to keep key space clean self.add_module( str(idx), create_conv2d_pad( in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs) ) self.splits = in_splits def forward(self, x): x_split = torch.split(x, self.splits, 1) x_out = [c(x_split[i]) for i, c in enumerate(self.values())] x = torch.cat(x_out, 1) return x
PyTorch/SpeechRecognition/QuartzNet/common
common
dataset
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from pathlib import Path import numpy as np import torch from torch.utils.data import Dataset, DataLoader from torch.utils.data.distributed import DistributedSampler from .audio import (audio_from_file, AudioSegment, GainPerturbation, ShiftPerturbation, SpeedPerturbation) from .text import _clean_text, punctuation_map def normalize_string(s, labels, punct_map): """Normalizes string. Example: 'call me at 8:00 pm!' -> 'call me at eight zero pm' """ labels = set(labels) try: text = _clean_text(s, ["english_cleaners"], punct_map).strip() return ''.join([tok for tok in text if all(t in labels for t in tok)]) except: print(f"WARNING: Normalizing failed: {s}") return None class FilelistDataset(Dataset): def __init__(self, filelist_fpath): self.samples = [line.strip() for line in open(filelist_fpath, 'r')] def __len__(self): return len(self.samples) def __getitem__(self, index): audio, audio_len = audio_from_file(self.samples[index]) return (audio.squeeze(0), audio_len, torch.LongTensor([0]), torch.LongTensor([0])) class SingleAudioDataset(FilelistDataset): def __init__(self, audio_fpath): self.samples = [audio_fpath] class AudioDataset(Dataset): def __init__(self, data_dir, manifest_fpaths, labels, sample_rate=16000, min_duration=0.1, max_duration=float("inf"), pad_to_max_duration=False, max_utts=0, normalize_transcripts=True, sort_by_duration=False, trim_silence=False, speed_perturbation=None, gain_perturbation=None, shift_perturbation=None, ignore_offline_speed_perturbation=False): """Loads audio, transcript and durations listed in a .json file. Args: data_dir: absolute path to dataset folder manifest_filepath: relative path from dataset folder to manifest json as described above. Can be coma-separated paths. labels (str): all possible output symbols min_duration (int): skip audio shorter than threshold max_duration (int): skip audio longer than threshold pad_to_max_duration (bool): pad all sequences to max_duration max_utts (int): limit number of utterances normalize_transcripts (bool): normalize transcript text sort_by_duration (bool): sort sequences by increasing duration trim_silence (bool): trim leading and trailing silence from audio ignore_offline_speed_perturbation (bool): use precomputed speed perturbation Returns: tuple of Tensors """ self.data_dir = data_dir self.labels = labels self.labels_map = dict([(labels[i], i) for i in range(len(labels))]) self.punctuation_map = punctuation_map(labels) self.blank_index = len(labels) self.pad_to_max_duration = pad_to_max_duration self.sort_by_duration = sort_by_duration self.max_utts = max_utts self.normalize_transcripts = normalize_transcripts self.ignore_offline_speed_perturbation = ignore_offline_speed_perturbation self.min_duration = min_duration self.max_duration = max_duration self.trim_silence = trim_silence self.sample_rate = sample_rate perturbations = [] if speed_perturbation is not None: perturbations.append(SpeedPerturbation(**speed_perturbation)) if gain_perturbation is not None: perturbations.append(GainPerturbation(**gain_perturbation)) if shift_perturbation is not None: perturbations.append(ShiftPerturbation(**shift_perturbation)) self.perturbations = perturbations self.max_duration = max_duration self.samples = [] self.duration = 0.0 self.duration_filtered = 0.0 for fpath in manifest_fpaths: self._load_json_manifest(fpath) if sort_by_duration: self.samples = sorted(self.samples, key=lambda s: s['duration']) def __getitem__(self, index): s = self.samples[index] rn_indx = np.random.randint(len(s['audio_filepath'])) duration = s['audio_duration'][rn_indx] if 'audio_duration' in s else 0 offset = s.get('offset', 0) segment = AudioSegment( s['audio_filepath'][rn_indx], target_sr=self.sample_rate, offset=offset, duration=duration, trim=self.trim_silence) for p in self.perturbations: p.maybe_apply(segment, self.sample_rate) segment = torch.FloatTensor(segment.samples) return (segment, torch.tensor(segment.shape[0]).int(), torch.tensor(s["transcript"]), torch.tensor(len(s["transcript"])).int()) def __len__(self): return len(self.samples) def _load_json_manifest(self, fpath): for s in json.load(open(fpath, "r", encoding="utf-8")): if self.pad_to_max_duration and not self.ignore_offline_speed_perturbation: # require all perturbed samples to be < self.max_duration s_max_duration = max(f['duration'] for f in s['files']) else: # otherwise we allow perturbances to be > self.max_duration s_max_duration = s['original_duration'] s['duration'] = s.pop('original_duration') if not (self.min_duration <= s_max_duration <= self.max_duration): self.duration_filtered += s['duration'] continue # Prune and normalize according to transcript tr = (s.get('transcript', None) or self.load_transcript(s['text_filepath'])) if not isinstance(tr, str): print(f'WARNING: Skipped sample (transcript not a str): {tr}.') self.duration_filtered += s['duration'] continue if self.normalize_transcripts: tr = normalize_string(tr, self.labels, self.punctuation_map) s["transcript"] = self.to_vocab_inds(tr) files = s.pop('files') if self.ignore_offline_speed_perturbation: files = [f for f in files if f['speed'] == 1.0] s['audio_duration'] = [f['duration'] for f in files] s['audio_filepath'] = [str(Path(self.data_dir, f['fname'])) for f in files] self.samples.append(s) self.duration += s['duration'] if self.max_utts > 0 and len(self.samples) >= self.max_utts: print(f'Reached max_utts={self.max_utts}. Finished parsing {fpath}.') break def load_transcript(self, transcript_path): with open(transcript_path, 'r', encoding="utf-8") as transcript_file: transcript = transcript_file.read().replace('\n', '') return transcript def to_vocab_inds(self, transcript): chars = [self.labels_map.get(x, self.blank_index) for x in list(transcript)] transcript = list(filter(lambda x: x != self.blank_index, chars)) return transcript def collate_fn(batch): bs = len(batch) max_len = lambda l, idx: max(el[idx].size(0) for el in l) audio = torch.zeros(bs, max_len(batch, 0)) audio_lens = torch.zeros(bs, dtype=torch.int32) transcript = torch.zeros(bs, max_len(batch, 2)) transcript_lens = torch.zeros(bs, dtype=torch.int32) for i, sample in enumerate(batch): audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0]) audio_lens[i] = sample[1] transcript[i].narrow(0, 0, sample[2].size(0)).copy_(sample[2]) transcript_lens[i] = sample[3] return audio, audio_lens, transcript, transcript_lens def get_data_loader(dataset, batch_size, multi_gpu=True, shuffle=True, drop_last=True, num_workers=4): kw = {'dataset': dataset, 'collate_fn': collate_fn, 'num_workers': num_workers, 'pin_memory': True} if multi_gpu: loader_shuffle = False sampler = DistributedSampler(dataset, shuffle=shuffle) else: loader_shuffle = shuffle sampler = None return DataLoader(batch_size=batch_size, drop_last=drop_last, sampler=sampler, shuffle=loader_shuffle, **kw)
TensorFlow/Translation/GNMT
GNMT
model
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Basic sequence-to-sequence model with dynamic RNN support.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import os import tensorflow as tf import numpy as np from tensorflow.python.framework import function from tensorflow.python.ops import math_ops import attention_wrapper import model_helper import beam_search_decoder from utils import iterator_utils from utils import math_utils from utils import misc_utils as utils from utils import vocab_utils utils.check_tensorflow_version() __all__ = ["BaseModel"] def create_attention_mechanism( num_units, memory, source_sequence_length, dtype=None): """Create attention mechanism based on the attention_option.""" # Mechanism attention_mechanism = attention_wrapper.BahdanauAttention( num_units, memory, memory_sequence_length=tf.to_int64(source_sequence_length), normalize=True, dtype=dtype) return attention_mechanism class BaseModel(object): """Sequence-to-sequence base class. """ def __init__(self, hparams, mode, features, scope=None, extra_args=None): """Create the model. Args: hparams: Hyperparameter configurations. mode: TRAIN | EVAL | INFER features: a dict of input features. scope: scope of the model. extra_args: model_helper.ExtraArgs, for passing customizable functions. """ self.hparams = hparams # Set params self._set_params_initializer(hparams, mode, features, scope, extra_args) # Train graph res = self.build_graph(hparams, scope=scope) self._set_train_or_infer(res, hparams) def _set_params_initializer(self, hparams, mode, features, scope, extra_args=None): """Set various params for self and initialize.""" self.mode = mode self.src_vocab_size = hparams.src_vocab_size self.tgt_vocab_size = hparams.tgt_vocab_size self.features = features self.time_major = hparams.time_major if hparams.use_char_encode: assert (not self.time_major), ("Can't use time major for" " char-level inputs.") self.dtype = tf.float16 if hparams.use_fp16 else tf.float32 # extra_args: to make it flexible for adding external customizable code self.single_cell_fn = None if extra_args: self.single_cell_fn = extra_args.single_cell_fn # Set num units self.num_units = hparams.num_units # Set num layers self.num_encoder_layers = hparams.num_encoder_layers self.num_decoder_layers = hparams.num_decoder_layers assert self.num_encoder_layers assert self.num_decoder_layers # Set num residual layers if hasattr(hparams, "num_residual_layers"): # compatible common_test_utils self.num_encoder_residual_layers = hparams.num_residual_layers self.num_decoder_residual_layers = hparams.num_residual_layers else: self.num_encoder_residual_layers = hparams.num_encoder_residual_layers self.num_decoder_residual_layers = hparams.num_decoder_residual_layers # Batch size self.batch_size = tf.size(self.features["source_sequence_length"]) # Global step global_step = tf.train.get_global_step() if global_step is not None: utils.print_out("global_step already created!") self.global_step = tf.train.get_or_create_global_step() utils.print_out("model.global_step.name: %s" % self.global_step.name) # Initializer self.random_seed = hparams.random_seed initializer = model_helper.get_initializer( hparams.init_op, self.random_seed, hparams.init_weight) tf.get_variable_scope().set_initializer(initializer) # Embeddings self.encoder_emb_lookup_fn = tf.nn.embedding_lookup self.init_embeddings(hparams, scope) def _set_train_or_infer(self, res, hparams): """Set up training.""" loss = res[1] if self.mode == tf.contrib.learn.ModeKeys.TRAIN: self.train_loss = loss self.word_count = tf.reduce_sum( self.features["source_sequence_length"]) + tf.reduce_sum( self.features["target_sequence_length"]) elif self.mode == tf.contrib.learn.ModeKeys.EVAL: self.eval_loss = loss elif self.mode == tf.contrib.learn.ModeKeys.INFER: self.infer_logits = res[0] self.infer_loss = loss self.sample_id = res[2] if self.mode != tf.contrib.learn.ModeKeys.INFER: ## Count the number of predicted words for compute ppl. self.predict_count = tf.reduce_sum( self.features["target_sequence_length"]) # Gradients and SGD update operation for training the model. # Arrange for the embedding vars to appear at the beginning. # Only build bprop if running on GPU and using dist_strategy, in which # case learning rate, grads and train_op are created in estimator model # function. with tf.name_scope("learning_rate"): self.learning_rate = tf.constant(hparams.learning_rate) # warm-up self.learning_rate = self._get_learning_rate_warmup(hparams) # decay self.learning_rate = self._get_learning_rate_decay(hparams) if (hparams.use_dist_strategy and self.mode == tf.contrib.learn.ModeKeys.TRAIN): # Gradients params = tf.trainable_variables() # Print trainable variables utils.print_out("# Trainable variables") utils.print_out( "Format: <name>, <shape>, <dtype>, <(soft) device placement>") for param in params: utils.print_out( " %s, %s, %s, %s" % (param.name, str(param.get_shape()), param.dtype.name, param.op.device)) utils.print_out("Total params size: %.2f GB" % (4. * np.sum([ p.get_shape().num_elements() for p in params if p.shape.is_fully_defined() ]) / 2**30)) # Optimizer if hparams.optimizer == "sgd": opt = tf.train.GradientDescentOptimizer(self.learning_rate) elif hparams.optimizer == "adam": opt = tf.train.AdamOptimizer(self.learning_rate) else: raise ValueError("Unknown optimizer type %s" % hparams.optimizer) assert opt is not None grads_and_vars = opt.compute_gradients( self.train_loss, params, colocate_gradients_with_ops=hparams.colocate_gradients_with_ops) gradients = [x for (x, _) in grads_and_vars] clipped_grads, grad_norm = model_helper.gradient_clip( gradients, max_gradient_norm=hparams.max_gradient_norm) self.grad_norm = grad_norm self.params = params self.grads = clipped_grads self.update = opt.apply_gradients( list(zip(clipped_grads, params)), global_step=self.global_step) else: self.grad_norm = None self.update = None self.params = None self.grads = None def _get_learning_rate_warmup(self, hparams): """Get learning rate warmup.""" warmup_steps = hparams.warmup_steps warmup_scheme = hparams.warmup_scheme utils.print_out(" learning_rate=%g, warmup_steps=%d, warmup_scheme=%s" % (hparams.learning_rate, warmup_steps, warmup_scheme)) if not warmup_scheme: return self.learning_rate # Apply inverse decay if global steps less than warmup steps. # Inspired by https://arxiv.org/pdf/1706.03762.pdf (Section 5.3) # When step < warmup_steps, # learing_rate *= warmup_factor ** (warmup_steps - step) if warmup_scheme == "t2t": # 0.01^(1/warmup_steps): we start with a lr, 100 times smaller warmup_factor = tf.exp(tf.log(0.01) / warmup_steps) inv_decay = warmup_factor**(tf.to_float(warmup_steps - self.global_step)) else: raise ValueError("Unknown warmup scheme %s" % warmup_scheme) return tf.cond( self.global_step < hparams.warmup_steps, lambda: inv_decay * self.learning_rate, lambda: self.learning_rate, name="learning_rate_warump_cond") def _get_decay_info(self, hparams): """Return decay info based on decay_scheme.""" if hparams.decay_scheme in [ "luong5", "luong10", "luong234", "jamesqin1616" ]: epoch_size, _, _ = iterator_utils.get_effective_epoch_size(hparams) num_train_steps = int(hparams.max_train_epochs * epoch_size / hparams.batch_size) decay_factor = 0.5 if hparams.decay_scheme == "luong5": start_decay_step = int(num_train_steps / 2) decay_times = 5 remain_steps = num_train_steps - start_decay_step elif hparams.decay_scheme == "luong10": start_decay_step = int(num_train_steps / 2) decay_times = 10 remain_steps = num_train_steps - start_decay_step elif hparams.decay_scheme == "luong234": start_decay_step = int(num_train_steps * 2 / 3) decay_times = 4 remain_steps = num_train_steps - start_decay_step elif hparams.decay_scheme == "jamesqin1616": # dehao@ reported TPU setting max_epoch = 2 and use luong234. # They start decay after 2 * 2/3 epochs for 4 times. # If keep max_epochs = 8 then decay should start at 8 * 2/(3 * 4) epochs # and for (4 *4 = 16) times. decay_times = 16 start_decay_step = int(num_train_steps / 16.) remain_steps = num_train_steps - start_decay_step decay_steps = int(remain_steps / decay_times) elif not hparams.decay_scheme: # no decay start_decay_step = num_train_steps decay_steps = 0 decay_factor = 1.0 elif hparams.decay_scheme: raise ValueError("Unknown decay scheme %s" % hparams.decay_scheme) return start_decay_step, decay_steps, decay_factor def _get_learning_rate_decay(self, hparams): """Get learning rate decay.""" start_decay_step, decay_steps, decay_factor = self._get_decay_info(hparams) utils.print_out(" decay_scheme=%s, start_decay_step=%d, decay_steps %d, " "decay_factor %g" % (hparams.decay_scheme, start_decay_step, decay_steps, decay_factor)) return tf.cond( self.global_step < start_decay_step, lambda: self.learning_rate, lambda: tf.train.exponential_decay( # pylint: disable=g-long-lambda self.learning_rate, (self.global_step - start_decay_step), decay_steps, decay_factor, staircase=True), name="learning_rate_decay_cond") def init_embeddings(self, hparams, scope): """Init embeddings.""" self.embedding_encoder, self.embedding_decoder = ( model_helper.create_emb_for_encoder_and_decoder( share_vocab=hparams.share_vocab, src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, src_embed_size=self.num_units, tgt_embed_size=self.num_units, dtype=self.dtype, num_enc_partitions=hparams.num_enc_emb_partitions, num_dec_partitions=hparams.num_dec_emb_partitions, src_vocab_file=hparams.src_vocab_file, tgt_vocab_file=hparams.tgt_vocab_file, src_embed_file=hparams.src_embed_file, tgt_embed_file=hparams.tgt_embed_file, use_char_encode=hparams.use_char_encode, scope=scope, )) def build_graph(self, hparams, scope=None): """Subclass must implement this method. Creates a sequence-to-sequence model with dynamic RNN decoder API. Args: hparams: Hyperparameter configurations. scope: VariableScope for the created subgraph; default "dynamic_seq2seq". Returns: A tuple of the form (logits, loss_tuple, final_context_state, sample_id), where: logits: float32 Tensor [batch_size x num_decoder_symbols]. loss: loss = the total loss / batch_size. final_context_state: the final state of decoder RNN. sample_id: sampling indices. Raises: ValueError: if encoder_type differs from mono and bi, or attention_option is not (luong | scaled_luong | bahdanau | normed_bahdanau). """ utils.print_out("# Creating %s graph ..." % self.mode) # Projection with tf.variable_scope(scope or "build_network"): with tf.variable_scope("decoder/output_projection"): self.output_layer = tf.layers.Dense( self.tgt_vocab_size, use_bias=False, name="output_projection", dtype=self.dtype) with tf.variable_scope(scope or "dynamic_seq2seq", dtype=self.dtype): # Encoder if hparams.language_model: # no encoder for language modeling utils.print_out(" language modeling: no encoder") self.encoder_outputs = None encoder_state = None else: self.encoder_outputs, encoder_state = self._build_encoder(hparams) ## Decoder logits, sample_id = ( self._build_decoder(self.encoder_outputs, encoder_state, hparams)) ## Loss if self.mode != tf.contrib.learn.ModeKeys.INFER: loss = self._compute_loss(logits, hparams.label_smoothing) else: loss = tf.constant(0.0) return logits, loss, sample_id @abc.abstractmethod def _build_encoder(self, hparams): """Subclass must implement this. Build and run an RNN encoder. Args: hparams: Hyperparameters configurations. Returns: A tuple of encoder_outputs and encoder_state. """ pass def _get_infer_maximum_iterations(self, hparams, source_sequence_length): """Maximum decoding steps at inference time.""" if hparams.tgt_max_len_infer: maximum_iterations = hparams.tgt_max_len_infer utils.print_out(" decoding maximum_iterations %d" % maximum_iterations) else: # TODO(thangluong): add decoding_length_factor flag decoding_length_factor = 2.0 max_encoder_length = tf.reduce_max(source_sequence_length) maximum_iterations = tf.to_int32( tf.round(tf.to_float(max_encoder_length) * decoding_length_factor)) return maximum_iterations def _build_decoder(self, encoder_outputs, encoder_state, hparams): """Build and run a RNN decoder with a final projection layer. Args: encoder_outputs: The outputs of encoder for every time step. encoder_state: The final state of the encoder. hparams: The Hyperparameters configurations. Returns: A tuple of final logits and final decoder state: logits: size [time, batch_size, vocab_size] when time_major=True. """ ## Decoder. with tf.variable_scope("decoder") as decoder_scope: ## Train or eval if self.mode != tf.contrib.learn.ModeKeys.INFER: # [batch, time] target_input = self.features["target_input"] if self.time_major: # If using time_major mode, then target_input should be [time, batch] # then the decoder_emb_inp would be [time, batch, dim] target_input = tf.transpose(target_input) decoder_emb_inp = tf.cast( tf.nn.embedding_lookup(self.embedding_decoder, target_input), self.dtype) if not hparams.use_fused_lstm_dec: cell, decoder_initial_state = self._build_decoder_cell( hparams, encoder_outputs, encoder_state, self.features["source_sequence_length"]) if hparams.use_dynamic_rnn: final_rnn_outputs, _ = tf.nn.dynamic_rnn( cell, decoder_emb_inp, sequence_length=self.features["target_sequence_length"], initial_state=decoder_initial_state, dtype=self.dtype, scope=decoder_scope, parallel_iterations=hparams.parallel_iterations, time_major=self.time_major) else: final_rnn_outputs, _ = tf.contrib.recurrent.functional_rnn( cell, decoder_emb_inp, sequence_length=tf.to_int32( self.features["target_sequence_length"]), initial_state=decoder_initial_state, dtype=self.dtype, scope=decoder_scope, time_major=self.time_major, use_tpu=False) else: if hparams.pass_hidden_state: decoder_initial_state = encoder_state else: decoder_initial_state = tuple((tf.nn.rnn_cell.LSTMStateTuple( tf.zeros_like(s[0]), tf.zeros_like(s[1])) for s in encoder_state)) final_rnn_outputs = self._build_decoder_fused_for_training( encoder_outputs, decoder_initial_state, decoder_emb_inp, self.hparams) # We chose to apply the output_layer to all timesteps for speed: # 10% improvements for small models & 20% for larger ones. # If memory is a concern, we should apply output_layer per timestep. logits = self.output_layer(final_rnn_outputs) sample_id = None ## Inference else: cell, decoder_initial_state = self._build_decoder_cell( hparams, encoder_outputs, encoder_state, self.features["source_sequence_length"]) assert hparams.infer_mode == "beam_search" _, tgt_vocab_table = vocab_utils.create_vocab_tables( hparams.src_vocab_file, hparams.tgt_vocab_file, hparams.share_vocab) tgt_sos_id = tf.cast( tgt_vocab_table.lookup(tf.constant(hparams.sos)), tf.int32) tgt_eos_id = tf.cast( tgt_vocab_table.lookup(tf.constant(hparams.eos)), tf.int32) start_tokens = tf.fill([self.batch_size], tgt_sos_id) end_token = tgt_eos_id beam_width = hparams.beam_width length_penalty_weight = hparams.length_penalty_weight coverage_penalty_weight = hparams.coverage_penalty_weight my_decoder = beam_search_decoder.BeamSearchDecoder( cell=cell, embedding=self.embedding_decoder, start_tokens=start_tokens, end_token=end_token, initial_state=decoder_initial_state, beam_width=beam_width, output_layer=self.output_layer, length_penalty_weight=length_penalty_weight, coverage_penalty_weight=coverage_penalty_weight) # maximum_iteration: The maximum decoding steps. maximum_iterations = self._get_infer_maximum_iterations( hparams, self.features["source_sequence_length"]) # Dynamic decoding outputs, _, _ = tf.contrib.seq2seq.dynamic_decode( my_decoder, maximum_iterations=maximum_iterations, output_time_major=self.time_major, swap_memory=True, scope=decoder_scope) logits = tf.no_op() sample_id = outputs.predicted_ids return logits, sample_id def get_max_time(self, tensor): time_axis = 0 if self.time_major else 1 return tensor.shape[time_axis].value or tf.shape(tensor)[time_axis] @abc.abstractmethod def _build_decoder_cell(self, hparams, encoder_outputs, encoder_state, source_sequence_length): """Subclass must implement this. Args: hparams: Hyperparameters configurations. encoder_outputs: The outputs of encoder for every time step. encoder_state: The final state of the encoder. source_sequence_length: sequence length of encoder_outputs. Returns: A tuple of a multi-layer RNN cell used by decoder and the initial state of the decoder RNN. """ pass def _softmax_cross_entropy_loss(self, logits, labels, label_smoothing): """Compute softmax loss or sampled softmax loss.""" use_defun = os.environ["use_defun"] == "true" use_xla = os.environ["use_xla"] == "true" # @function.Defun(noinline=True, compiled=use_xla) def ComputePositiveCrossent(labels, logits): crossent = math_utils.sparse_softmax_crossent_with_logits( labels=labels, logits=logits) return crossent crossent = ComputePositiveCrossent(labels, logits) assert crossent.dtype == tf.float32 def _safe_shape_div(x, y): """Divides `x / y` assuming `x, y >= 0`, treating `0 / 0 = 0`.""" return x // tf.maximum(y, 1) @function.Defun(tf.float32, tf.float32, compiled=use_xla) def ReduceSumGrad(x, grad): """docstring.""" input_shape = tf.shape(x) # TODO(apassos) remove this once device placement for eager ops makes more # sense. with tf.colocate_with(input_shape): output_shape_kept_dims = math_ops.reduced_shape(input_shape, -1) tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) grad = tf.reshape(grad, output_shape_kept_dims) return tf.tile(grad, tile_scaling) def ReduceSum(x): """docstring.""" return tf.reduce_sum(x, axis=-1) if use_defun: ReduceSum = function.Defun( tf.float32, compiled=use_xla, noinline=True, grad_func=ReduceSumGrad)(ReduceSum) if abs(label_smoothing) > 1e-3: # pylint:disable=invalid-name def ComputeNegativeCrossentFwd(logits): """docstring.""" # [time, batch, dim] # [time, batch] max_logits = tf.reduce_max(logits, axis=-1) # [time, batch, dim] shifted_logits = logits - tf.expand_dims(max_logits, axis=-1) # Always compute loss in fp32 shifted_logits = tf.to_float(shifted_logits) # [time, batch] log_sum_exp = tf.log(ReduceSum(tf.exp(shifted_logits))) # [time, batch, dim] - [time, batch, 1] --> reduce_sum(-1) --> # [time, batch] neg_crossent = ReduceSum( shifted_logits - tf.expand_dims(log_sum_exp, axis=-1)) return neg_crossent def ComputeNegativeCrossent(logits): return ComputeNegativeCrossentFwd(logits) if use_defun: ComputeNegativeCrossent = function.Defun( compiled=use_xla)(ComputeNegativeCrossent) neg_crossent = ComputeNegativeCrossent(logits) neg_crossent = tf.to_float(neg_crossent) num_labels = logits.shape[-1].value crossent = (1.0 - label_smoothing) * crossent - ( label_smoothing / tf.to_float(num_labels) * neg_crossent) # pylint:enable=invalid-name return crossent def _compute_loss(self, logits, label_smoothing): """Compute optimization loss.""" target_output = self.features["target_output"] if self.time_major: target_output = tf.transpose(target_output) max_time = self.get_max_time(target_output) self.batch_seq_len = max_time crossent = self._softmax_cross_entropy_loss( logits, target_output, label_smoothing) assert crossent.dtype == tf.float32 target_weights = tf.sequence_mask( self.features["target_sequence_length"], max_time, dtype=crossent.dtype) if self.time_major: # [time, batch] if time_major, since the crossent is [time, batch] in this # case. target_weights = tf.transpose(target_weights) loss = tf.reduce_sum(crossent * target_weights) / tf.to_float( self.batch_size) return loss def build_encoder_states(self, include_embeddings=False): """Stack encoder states and return tensor [batch, length, layer, size].""" assert self.mode == tf.contrib.learn.ModeKeys.INFER if include_embeddings: stack_state_list = tf.stack( [self.encoder_emb_inp] + self.encoder_state_list, 2) else: stack_state_list = tf.stack(self.encoder_state_list, 2) # transform from [length, batch, ...] -> [batch, length, ...] if self.time_major: stack_state_list = tf.transpose(stack_state_list, [1, 0, 2, 3]) return stack_state_list
TensorFlow2/Classification/ConvNets/scripts/docker
docker
build
#!/bin/bash # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License set -euxo pipefail arg1=$1 # CONTAINER_TF2x_BASE="gitlab-master.nvidia.com:5005/dl/dgx/tensorflow" # CONTAINER_TF2x_TAG="21.10-tf2-py3-devel" CONTAINER_TF2x_BASE="nvcr.io/nvidia/tensorflow" CONTAINER_TF2x_TAG="21.09-tf2-py3" # ======================== Refresh base image ======================== # docker pull "${CONTAINER_TF2x_BASE}:${CONTAINER_TF2x_TAG}" # ========================== Build container ========================= # echo -e "\n\nBuilding Effnet_SavedModel Container\n\n" echo $arg1 sleep 1 # the image name is given by the user ($1). Example: nvcr.io/nvidian/efficientnet-tf2:v2-ga-tf2-py3 docker build -t "$arg1" \ --build-arg FROM_IMAGE_NAME="${CONTAINER_TF2x_BASE}:${CONTAINER_TF2x_TAG}" \ .
Tools/PyTorch/TimeSeriesPredictionPlatform/models
models
stat_models
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC import os import pmdarima as pm # import cuml import numpy as np from cuml.tsa.auto_arima import AutoARIMA as cuMLAutoArima import pickle as pkl class StatModel(ABC): def __init__(self, config): self.horizon = config.example_length - config.encoder_length self.config = config def fit(self, label, data): return def predict(self, data, i): return def save(self): return def load(self, path): return class AutoARIMA(StatModel): def __init__(self, config): super().__init__(config) self.models = [] def fit(self, label, data): self.model = pm.auto_arima(label, X=data) self.models.append(self.model) def predict(self, data, i): model = self.models[i] return model.predict(self.horizon, X=data) def save(self): with open('arima.pkl', 'wb') as f: pkl.dump(self.models, f) def load(self, path): with open(os.path.join(path, 'arima.pkl'), 'rb') as f: self.models = pkl.load(f) class CUMLAutoARIMA(StatModel): def __init__(self, config): super().__init__(config) self.models = [] def fit(self, label, data): self.model = cuMLAutoArima(label.astype(np.float64)) self.model.search() self.model.fit() self.models.append(self.model) def predict(self, data, i): model = self.models[i] return model.forecast(self.horizon).get() def save(self): with open('arima.pkl', 'wb') as f: pkl.dump(self.models, f) def load(self, path): with open(os.path.join(path, 'arima.pkl'), 'rb') as f: self.models = pkl.load(f)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trtis
trtis
CMakeLists
## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # include_directories(".") include_directories("${CMAKE_SOURCE_DIR}/trtis_sdk/include") link_directories("${CMAKE_SOURCE_DIR}/trtis_sdk/lib") file(GLOB trtis_sources *.cpp) configure_file(tt2i_trtis.ldscript tt2i_trtis.ldscript COPYONLY) add_library(tt2i_trtis SHARED ${trtis_sources}) target_include_directories(tt2i_trtis PRIVATE ) target_include_directories(tt2i_trtis PRIVATE "${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}" ../trt/ ../trt/util ../trt/tacotron2 ../trt/waveglow ../trt/denoiser ../trt/common ../extra ) target_link_libraries(tt2i_trtis tt2i "${LIBS}" custombackend) set_property(TARGET tt2i_trtis PROPERTY LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) set_target_properties( tt2i_trtis PROPERTIES LINK_FLAGS "-Wl,--version-script tt2i_trtis.ldscript" )
PyTorch/Segmentation/MaskRCNN/pytorch/notebooks
notebooks
pytorch_MaskRCNN_pyt_train_and_inference
#!/usr/bin/env python # coding: utf-8 # In[ ]: # Copyright 2019 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # # # Mask R-CNN Training and Inference Demo # ## Overview # # Mask R-CNN is a convolution-based neural network architecture for the task of object instance segmentation. The original paper describing the model can be found at [https://arxiv.org/abs/1703.06870](https://arxiv.org/abs/1703.06870). NVIDIA’s Mask R-CNN is an optimized version of [Facebook’s implementation](https://github.com/facebookresearch/maskrcnn-benchmark), leveraging mixed precision arithmetic and tensor cores for faster training while maintaining comparable accuracy with single precision (FP32) training. # # The major differences between the official implementation of the paper and our version of Mask R-CNN are as follows: # # - Mixed precision support with [PyTorch automatic mixed precision (AMP)](https://github.com/NVIDIA/apex). # - Gradient accumulation to simulate larger batches. # - Custom fused CUDA kernels for faster computations. # # Note: If the issue `DLLLoggerAlreadyInitialized:` encountered, # try again after restarting the notebook. # # # ### Learning objectives # # This notebook demonstrates the steps for training a Mask R-CNN model using 1 or multiple GPUs. We then employ the trained model to make inference on new images. # # ## Content # 1. [Requirements](#1) # 1. [Data download and preprocessing](#2) # 1. [Training](#3) # 1. [Testing trained model](#4) # # <a id="1"></a> # ## 1. Requirements # # # ### 1.1 Docker container # The most convenient way to make use of the NVIDIA Mask R-CNN model is via a docker container, which provides a self-contained, isolated and re-producible environment for all experiments. Refer to the [Quick Start Guide section](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/MaskRCNN#requirements) of the Readme documentation for a comprehensive guide. We briefly summarize the steps here. # # First, clone the repository: # # ``` # git clone https://github.com/NVIDIA/DeepLearningExamples.git # cd DeepLearningExamples/PyTorch/Segmentation/MaskRCNN # ``` # # Next, build the NVIDIA Mask R-CNN container: # # ``` # cd pytorch # docker build --rm -t nvidia_joc_maskrcnn_pt . # ``` # # Then launch the container with: # # ``` # PATH_TO_COCO='/path/to/coco-2014' # MOUNT_LOCATION='/datasets/data' # NAME='nvidia_maskrcnn' # # docker run --it --runtime=nvidia -p 8888:8888 -v $PATH_TO_COCO:/$MOUNT_LOCATION --rm --name=$NAME --shm-size=10g --ulimit memlock=-1 --ulimit stack=67108864 --ipc=host nvidia_joc_maskrcnn_pt # ``` # where `/path/to/coco-2014` is the path on the host machine where the data was/is to be downloaded. More on data set preparation in the next section. # # Within the docker interactive bash session, start Jupyter with # # ``` # jupyter notebook --ip 0.0.0.0 --port 8888 # ``` # # Then open the Jupyter GUI interface on your host machine at http://localhost:8888. Within the container, this notebook itself is located at `/workspace/object_detection/demo`. # # ### 1.2 Hardware # This notebook can be executed on any CUDA-enabled NVIDIA GPU, although for efficient mixed precision training, a [Tensor Core NVIDIA GPU](https://www.nvidia.com/en-us/data-center/tensorcore/) is desired (Volta, Turing or newer architectures). # In[ ]: get_ipython().system('nvidia-smi') get_ipython().system('pip install ipywidgets') get_ipython().system('jupyter nbextension enable --py widgetsnbextension') # <a id="2"></a> # ## 2. Data download and preprocessing # # This notebook demonstrates training and validation of the Mask R-CNN model on the [COCO 2014 dataset](http://cocodataset.org/#download). If not already available locally, the following [script](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/download_dataset.sh) in the repository provides a convenient way to download and extract all the necessary data in one go. Be mindful of the size of the raw data (~20GB). The script makes use of `wget` and will automatically resume if disrupted. Once downloaded, the script invokes `dtrx` to extract the data. # In[ ]: get_ipython().system(' wget https://raw.githubusercontent.com/NVIDIA/DeepLearningExamples/master/PyTorch/Segmentation/MaskRCNN/download_dataset.sh -P /workspace/object_detection/') get_ipython().system(' bash /workspace/object_detection/download_dataset.sh /datasets/data') # Within the docker container, the final data directory should look like: # # ``` # /datasets/data # annotations/ # instances_train2014.json # instances_val2014.json # train2014/ # COCO_train2014_*.jpg # val2014/ # COCO_val2014_*.jpg # ``` # <a id="3"></a> # ## 3. Training # The shell script [train.sh](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/scripts/train.sh) provides a convenient interface to launch training tasks. # By default, invoking [train.sh](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/scripts/train.sh) will make use of 8 GPUs, saves checkpoints every 2500 iterations and uses mixed precision training. # # ``` # cd /workspace/object_detection/ # bash scripts/train.sh # ``` # Note that, within [train.sh](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/scripts/train.sh), it invokes the following Python command: # # ```python -m torch.distributed.launch --nproc_per_node=8 tools/train_net.py --config-file "configs/e2e_mask_rcnn_R_50_FPN_1x.yaml" DTYPE "float16"``` # # which launches pytorch distributed training with 8 GPUs, using the train script in [tools/train_net.py](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/tools/train_net.py). Various sample training configurations are available within the [configs](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/MaskRCNN/pytorch/configs) directory, for example, a [configuration file](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/configs/e2e_mask_rcnn_R_50_FPN_1x_1GPU.yaml) for training using 1 GPU. # # ### 3.1 Training with 1 GPU # We will now take a closer look at training a Mask-RCNN model using 1 GPU, using the below custom config script. # In[ ]: get_ipython().run_cell_magic('bash', '', 'echo \'MODEL:\n META_ARCHITECTURE: "GeneralizedRCNN"\n WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"\n BACKBONE:\n CONV_BODY: "R-50-FPN"\n OUT_CHANNELS: 256\n RPN:\n USE_FPN: True\n ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n PRE_NMS_TOP_N_TRAIN: 2000\n PRE_NMS_TOP_N_TEST: 1000\n POST_NMS_TOP_N_TEST: 1000\n FPN_POST_NMS_TOP_N_TEST: 1000\n ROI_HEADS:\n USE_FPN: True\n ROI_BOX_HEAD:\n POOLER_RESOLUTION: 7\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n POOLER_SAMPLING_RATIO: 2\n FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"\n PREDICTOR: "FPNPredictor"\n ROI_MASK_HEAD:\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"\n PREDICTOR: "MaskRCNNC4Predictor"\n POOLER_RESOLUTION: 14\n POOLER_SAMPLING_RATIO: 2\n RESOLUTION: 28\n SHARE_BOX_FEATURE_EXTRACTOR: False\n MASK_ON: True\nDATASETS:\n TRAIN: ("coco_2014_train", "coco_2014_valminusminival")\n TEST: ("coco_2014_minival",)\nDATALOADER:\n SIZE_DIVISIBILITY: 32\nSOLVER:\n BASE_LR: 0.005\n WEIGHT_DECAY: 0.0001\n STEPS: (240000, 320000)\n MAX_ITER: 360000\n IMS_PER_BATCH: 4\nTEST:\n IMS_PER_BATCH: 16\n\' > /workspace/object_detection/configs/custom_config.yml\n') # Various configurable options within the above configuration file can be modified, for example, the learning rate `BASE_LR`, training batch size `IMS_PER_BATCH` or number of train iterations `MAX_ITER`. The training process will start from a pre-trained Resnet-50 backbone model downloaded from `https://dl.fbaipublicfiles.com/detectron/ImageNetPretrained/MSRA/R-50.pkl`. # # #### Training with full precision (FP32) # Next, we launch the training script using the [custom configuration script](../configs/custom_config.yml) just created above. A full training cycle with 360000 iterations on the COCO-2014 data on a single GPU might take as much as 1.5 days on an NVIDIA V100 GPU. # In[ ]: get_ipython().run_line_magic('run', '../tools/train_net.py --config-file "/workspace/object_detection/configs/custom_config.yml" DTYPE "float32" OUTPUT_DIR ./results/1GPU-FP32/') # Upon completion, the final model is saved to `./results/1GPU-FP32/model_final.pth`. # #### Training with mixed-precision # Next, we launch the training script using the same [custom configuration script](../configs/custom_config.yml) created above. # In[ ]: get_ipython().run_line_magic('run', '../tools/train_net.py --config-file "/workspace/object_detection/configs/custom_config.yml" DTYPE "float16" OUTPUT_DIR ./results/1GPU-FP16/') # On compatible NVIDIA GPUs, the training script makes use of the Tensor cores for higher FP16 arithmetic throughput. On a V100 GPU, this shortens the training time by about 30%. # ### 3.2 Training with 8 GPUs # We will now configure a training script to train a Mask-RCNN model using 8 GPUs. Thanks to having 8 GPUs, we can reduce the number of training iterations by a factor of 8, while the learning rate is also increased by 8 times to account for the larger global batch size. # In[ ]: get_ipython().run_cell_magic('bash', '', 'echo \'MODEL:\n META_ARCHITECTURE: "GeneralizedRCNN"\n WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50"\n BACKBONE:\n CONV_BODY: "R-50-FPN"\n OUT_CHANNELS: 256\n RPN:\n USE_FPN: True\n ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n PRE_NMS_TOP_N_TRAIN: 2000\n PRE_NMS_TOP_N_TEST: 1000\n POST_NMS_TOP_N_TEST: 1000\n FPN_POST_NMS_TOP_N_TEST: 1000\n FPN_POST_NMS_TOP_N_TRAIN: 4000\n ROI_HEADS:\n USE_FPN: True\n ROI_BOX_HEAD:\n POOLER_RESOLUTION: 7\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n POOLER_SAMPLING_RATIO: 2\n FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"\n PREDICTOR: "FPNPredictor"\n ROI_MASK_HEAD:\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"\n PREDICTOR: "MaskRCNNC4Predictor"\n POOLER_RESOLUTION: 14\n POOLER_SAMPLING_RATIO: 2\n RESOLUTION: 28\n SHARE_BOX_FEATURE_EXTRACTOR: False\n MASK_ON: True\nDATASETS:\n TRAIN: ("coco_2014_train", "coco_2014_valminusminival")\n TEST: ("coco_2014_minival",)\nDATALOADER:\n SIZE_DIVISIBILITY: 32\nSOLVER:\n BASE_LR: 0.04\n WEIGHT_DECAY: 0.0001\n STEPS: (36000, 48000)\n MAX_ITER: 50000\n IMS_PER_BATCH: 32\nTEST:\n IMS_PER_BATCH: 8\n\' > /workspace/object_detection/configs/custom_config_8_GPUs.yml\n') # #### Training with full precision # In[ ]: get_ipython().run_line_magic('run', '-m torch.distributed.launch -- --nproc_per_node=8 ../tools/train_net.py --config-file "/workspace/object_detection/configs/custom_config_8_GPUs.yml" DTYPE "float32" OUTPUT_DIR ./results/8GPU-FP32/') # If the Jupyter graphical interface does not update the training progress on the fly, you can observe the information being printed in the shell window from where you launched Jupyter. # #### Training with mixed-precision # We now launch the training process using mixed precision. Observe the information being printed in the shell window from where you launched Jupyter. # In[ ]: get_ipython().run_line_magic('run', '-m torch.distributed.launch -- --nproc_per_node=8 ../tools/train_net.py --config-file "/workspace/object_detection/configs/custom_config_8_GPUs.yml" DTYPE "float16" OUTPUT_DIR ./results/8GPU-FP16/') # <a id="4"></a> # ## 4. Testing trained model # # After model training has completed, we can test the trained model against the COCO-2014 validation set. First, we create a new configuration file for the test. Note: you must point the model `WEIGHT` parameter to a final model checkpoint, e.g. `./results/8GPU-FP32/model_final.pth`. # In[ ]: get_ipython().run_cell_magic('bash', '', 'echo \'MODEL:\n META_ARCHITECTURE: "GeneralizedRCNN"\n WEIGHT: "./results/8GPU-FP16/model_final.pth"\n BACKBONE:\n CONV_BODY: "R-50-FPN"\n OUT_CHANNELS: 256\n RPN:\n USE_FPN: True\n ANCHOR_STRIDE: (4, 8, 16, 32, 64)\n PRE_NMS_TOP_N_TRAIN: 2000\n PRE_NMS_TOP_N_TEST: 1000\n POST_NMS_TOP_N_TEST: 1000\n FPN_POST_NMS_TOP_N_TEST: 1000\n ROI_HEADS:\n USE_FPN: True\n ROI_BOX_HEAD:\n POOLER_RESOLUTION: 7\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n POOLER_SAMPLING_RATIO: 2\n FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"\n PREDICTOR: "FPNPredictor"\n ROI_MASK_HEAD:\n POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)\n FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"\n PREDICTOR: "MaskRCNNC4Predictor"\n POOLER_RESOLUTION: 14\n POOLER_SAMPLING_RATIO: 2\n RESOLUTION: 28\n SHARE_BOX_FEATURE_EXTRACTOR: False\n MASK_ON: True\nDATASETS:\n TRAIN: ("coco_2014_train", "coco_2014_valminusminival")\n TEST: ("coco_2014_minival",)\nDATALOADER:\n SIZE_DIVISIBILITY: 32\nSOLVER:\n BASE_LR: 0.005\n WEIGHT_DECAY: 0.0001\n STEPS: (240000, 360000)\n MAX_ITER: 360000\n IMS_PER_BATCH: 4\nTEST:\n IMS_PER_BATCH: 16\n\' > /workspace/object_detection/configs/test_custom_config.yml\n') # ### Validating on the COCO-2014 mini evaluation data set # Next, we launch the evaluation script, which will read the COCO-2014 mini evaluation dataset of 5000 images and evaluate various quality metrics, such as recall, precision and IoU at various thresholds. # In[ ]: get_ipython().run_line_magic('run', '../tools/test_net.py --config-file /workspace/object_detection/configs/test_custom_config.yml DTYPE "float16" DATASETS.TEST "(\\"coco_2014_minival\\",)" OUTPUT_DIR ./results/8GPU-FP16/evaluation TEST.IMS_PER_BATCH 1') # ### Testing on new images # # We will now launch an interactive testing, where you can load new test images. First, we load some required libraries and define some helper functions to load images. # In[23]: import matplotlib.pyplot as plt import matplotlib.pylab as pylab import requests from io import BytesIO from PIL import Image import numpy as np # this makes our figures bigger pylab.rcParams['figure.figsize'] = 20, 20 from maskrcnn_benchmark.config import cfg import sys,os sys.path.append(os.path.realpath('..')) from demo.predictor import COCODemo def load(url): """ Given an url of an image, downloads the image and returns a PIL image """ response = requests.get(url) pil_image = Image.open(BytesIO(response.content)).convert("RGB") # convert to BGR format image = np.array(pil_image)[:, :, [2, 1, 0]] return image def imshow(img): plt.imshow(img[:, :, [2, 1, 0]]) plt.axis("off") # Next, we load the trained model specified in the test configuration file, e.g. from `./results/8GPU-FP32/model_final.pth`. # In[ ]: config_file = "/workspace/object_detection/configs/test_custom_config.yml" # update the config options with the config file cfg.merge_from_file(config_file) # manual override some options cfg.merge_from_list(["MODEL.DEVICE", "cuda"]) coco_demo = COCODemo( cfg, min_image_size=800, confidence_threshold=0.7, ) # User now can load a test image from any public URL. # In[21]: # from http://cocodataset.org/#explore?id=345434 image = load("http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg") imshow(image) # The prediction result is then displayed. # In[22]: # compute predictions predictions = coco_demo.run_on_opencv_image(image) imshow(predictions) # # Conclusion # # In this notebook, we have walked through the complete process of preparing the container and data required for training Mask-RCNN models. We have also investigated various training options, trained and tested Mask-RCNN models with various configurations. # # ## What's next # Now it's time to try the MaskR-CNN on your own data. Observe the performance impact of mixed precision training while comparing the final accuracy of the models trained with FP32 and mixed precision. # # In[ ]:
CUDA-Optimized/FastSpeech/fastspeech
fastspeech
infer
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import fire from fastspeech import hparam as hp, DEFAULT_DEVICE from fastspeech.dataset.ljspeech_dataset import LJSpeechDataset from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer from fastspeech.model.fastspeech import Fastspeech from fastspeech.data_load import PadDataLoader from fastspeech.utils.logging import tprint import torch import pprint from fastspeech.utils.time import TimeElapsed # import multiprocessing # multiprocessing.set_start_method('spawn', True) pp = pprint.PrettyPrinter(indent=4, width=1000) def infer(hparam="infer.yaml", device=DEFAULT_DEVICE, n_iters=1, **kwargs): """ The FastSpeech model inference script. By default, this script assumes to load parameters in the default config file, fastspeech/hparams/infer.yaml. Besides the flags, you can also set parameters in the config file via the command-line. For examples, --dataset_path=DATASET_PATH Path to dataset directory. --checkpoint_path=CHECKPOINT_PATH Path to checkpoint directory. The latest checkpoint will be loaded. --batch_size=BATCH_SIZE Batch size to use. Defaults to 1. Refer to fastspeech/hparams/infer.yaml to see more parameters. Args: hparam (str, optional): Path to default config file. Defaults to "infer.yaml". device (str, optional): Device to use. Defaults to "cuda" if avaiable, or "cpu". n_iters (int, optional): Number of batches to infer. Defaults to 1. """ hp.set_hparam(hparam, kwargs) tprint("Hparams:\n{}".format(pp.pformat(hp))) tprint("Device count: {}".format(torch.cuda.device_count())) # model model = Fastspeech( max_seq_len=hp.max_seq_len, d_model=hp.d_model, phoneme_side_n_layer=hp.phoneme_side_n_layer, phoneme_side_head=hp.phoneme_side_head, phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size, phoneme_side_output_size=hp.phoneme_side_output_size, mel_side_n_layer=hp.mel_side_n_layer, mel_side_head=hp.mel_side_head, mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size, mel_side_output_size=hp.mel_side_output_size, duration_predictor_filter_size=hp.duration_predictor_filter_size, duration_predictor_kernel_size=hp.duration_predictor_kernel_size, fft_conv1d_kernel=hp.fft_conv1d_kernel, fft_conv1d_padding=hp.fft_conv1d_padding, dropout=hp.dropout, n_mels=hp.num_mels, fused_layernorm=hp.fused_layernorm ) dataset = LJSpeechDataset(root_path=hp.dataset_path, meta_file=hp.meta_file, sr=hp.sr, n_fft=hp.n_fft, win_len=hp.win_len, hop_len=hp.hop_len, n_mels=hp.num_mels, mel_fmin=hp.mel_fmin, mel_fmax=hp.mel_fmax, exclude_mels=True, sort_by_length=True if hp.use_trt and hp.trt_multi_engine else False ) tprint("Dataset size: {}".format(len(dataset))) data_loader = PadDataLoader(dataset, batch_size=hp.batch_size, num_workers=hp.n_workers, shuffle=False if hp.use_trt and hp.trt_multi_engine else True, drop_last=True, ) inferencer = get_inferencer(model, data_loader, device) try: n_iters = min(len(data_loader), n_iters) if n_iters else len(data_loader) tprint("Num of iters: {}".format(n_iters)) with inferencer: for i in range(n_iters): tprint("------------- INFERENCE : batch #{} -------------".format(i)) with TimeElapsed(name="Inference Time", cuda_sync=True): out_batch = inferencer.infer() # tprint("Output:\n{}".format(pp.pformat(out_batch))) tprint("Inference has been done.") except KeyboardInterrupt: tprint("Inference has been canceled.") def get_inferencer(model, data_loader, device): if hp.use_trt: if hp.trt_multi_engine: from fastspeech.trt.fastspeech_trt_multi_engine_inferencer import FastSpeechTRTMultiEngineInferencer inferencer = FastSpeechTRTMultiEngineInferencer('fastspeech', model, data_loader=data_loader, ckpt_path=hp.checkpoint_path, trt_max_ws_size=hp.trt_max_ws_size, trt_force_build=hp.trt_force_build, use_fp16=hp.use_fp16, trt_file_path_list=hp.trt_file_path_list, trt_max_input_seq_len_list=hp.trt_max_input_seq_len_list, trt_max_output_seq_len_list=hp.trt_max_output_seq_len_list, ) else: from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer inferencer = FastSpeechTRTInferencer('fastspeech', model, data_loader=data_loader, ckpt_path=hp.checkpoint_path, trt_max_ws_size=hp.trt_max_ws_size, trt_file_path=hp.trt_file_path, use_fp16=hp.use_fp16, trt_force_build=hp.trt_force_build, trt_max_input_seq_len=hp.trt_max_input_seq_len, trt_max_output_seq_len=hp.trt_max_output_seq_len, ) else: inferencer = FastSpeechInferencer( 'fastspeech', model, data_loader=data_loader, ckpt_path=hp.checkpoint_path, log_path=hp.log_path, device=device, use_fp16=hp.use_fp16) return inferencer if __name__ == '__main__': torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = False fire.Fire(infer)
PyTorch/Classification/GPUNet/triton/runner
runner
finalizer
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import pathlib from typing import Dict, List # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .experiment import ExperimentResult from .logger import LOGGER from .stages import ResultsType from .summary import load_results, save_summary from .task import Task class Finalizer(abc.ABC): @abc.abstractmethod def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]): pass class ExperimentFinalizer(Finalizer): """ Public runner finalizer object. """ def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]): results_path = workspace / task.results_dir self._generate_summary(results_path, results) self._finalize_task(results_path, task) def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None: """ Finalize task information Args: task: Task object Returns: None """ task.end() file_path = results_path / task.filename LOGGER.debug(f"Saving task details to file {file_path}") task.to_file(file_path) LOGGER.debug("Done") LOGGER.info(f"Task details and results stored in {results_path}") def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]): """ Generate summary for results collected in all experiments Args: results_path: Path where results should be stored experiment_results: Results collected from experiments Returns: """ performance_offline_results = list() performance_online_results = list() results_mapping = { ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results, ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results, } self._collect_summary_results(experiment_results, results_mapping) self._prepare_final_results(results_path, results_mapping) def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict): for experiment_result in experiment_results: experiment = experiment_result.experiment for result_type, result_path in experiment_result.results.items(): if not result_path.is_file() and not result_path.is_dir(): raise FileNotFoundError(f"Expected file {result_path} not found") LOGGER.debug(f"Found {result_type} in {result_path} file.") if result_type not in results_mapping: LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.") return LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary") result = load_results( results_path=result_path, parameters=experiment.parameters, result_type=result_type, ) results_mapping[result_type].extend(result) LOGGER.debug("Done.") def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None: """ Prepare summary files for offline and online performance Args: results_path: Path where results should be stored results_mapping: Mapping with results type and collected results for given stage Returns: None """ for results_type, results in results_mapping.items(): save_summary( result_type=results_type, results=results, summary_dir=results_path, )
TensorFlow/LanguageModeling/BERT/scripts/configs
configs
pretrain_config
#!/usr/bin/env bash # Full LAMB pretraining configs for NVIDIA DGX A100 (8x NVIDIA A100 40GB GPU) dgxa100_8gpu_fp16 () { train_batch_size_phase1=64 train_batch_size_phase2=16 eval_batch_size=8 learning_rate_phase1="7.5e-4" learning_rate_phase2="5e-4" precision="fp16" use_xla="true" num_gpus=8 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu } dgxa100_8gpu_tf32 () { train_batch_size_phase1=64 train_batch_size_phase2=8 eval_batch_size=8 learning_rate_phase1="7.5e-4" learning_rate_phase2="5e-4" precision="tf32" use_xla="true" num_gpus=8 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu } # Full LAMB pretraining configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU) dgx2_16gpu_fp16 () { train_batch_size_phase1=64 train_batch_size_phase2=8 eval_batch_size=8 learning_rate_phase1="3.75e-4" learning_rate_phase2="2.5e-4" precision="fp16" use_xla="true" num_gpus=16 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu } dgx2_16gpu_fp32 () { train_batch_size_phase1=32 train_batch_size_phase2=8 eval_batch_size=8 learning_rate_phase1="3.75e-4" learning_rate_phase2="2.5e-4" precision="fp32" use_xla="true" num_gpus=16 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu } # Full LAMB pretraining configs for NVIDIA DGX-1 (8x NVIDIA V100 16GB GPU) dgx1_8gpu_fp16 () { train_batch_size_phase1=16 train_batch_size_phase2=4 eval_batch_size=8 learning_rate_phase1="7.5e-4" learning_rate_phase2="5e-4" precision="fp16" use_xla="true" num_gpus=8 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu } dgx1_8gpu_fp32 () { train_batch_size_phase1=8 train_batch_size_phase2=2 eval_batch_size=8 learning_rate_phase1="7.5e-4" learning_rate_phase2="5e-4" precision="fp32" use_xla="true" num_gpus=8 echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpu }
TensorFlow/Segmentation/UNet_Industrial/scripts
scripts
UNet_8GPU_XLA
#!/usr/bin/env bash # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches UNet training in FP32 on 8 GPUs using 16 batch size (2 per GPU) # Usage ./UNet_FP32_8GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)> BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export TF_CPP_MIN_LOG_LEVEL=3 mpirun \ -np 8 \ -H localhost:8 \ -bind-to none \ -map-by slot \ -x NCCL_DEBUG=VERSION \ -x LD_LIBRARY_PATH \ -x PATH \ -mca pml ob1 -mca btl ^openib \ --allow-run-as-root \ python "${BASEDIR}/../main.py" \ --unet_variant='tinyUNet' \ --activation_fn='relu' \ --exec_mode='train_and_evaluate' \ --iter_unit='batch' \ --num_iter=2500 \ --batch_size=2 \ --warmup_step=10 \ --results_dir="${1}" \ --data_dir="${2}" \ --dataset_name='DAGM2007' \ --dataset_classID="${3}" \ --data_format='NCHW' \ --use_auto_loss_scaling \ --noamp \ --xla \ --learning_rate=1e-4 \ --learning_rate_decay_factor=0.8 \ --learning_rate_decay_steps=500 \ --rmsprop_decay=0.9 \ --rmsprop_momentum=0.8 \ --loss_fn_name='adaptive_loss' \ --weight_decay=1e-5 \ --weight_init_method='he_uniform' \ --augment_data \ --display_every=250 \ --debug_verbosity=0
TensorFlow/Detection/SSD/models/research/slim/nets
nets
dcgan_test
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for dcgan.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from nets import dcgan class DCGANTest(tf.test.TestCase): def test_generator_run(self): tf.set_random_seed(1234) noise = tf.random_normal([100, 64]) image, _ = dcgan.generator(noise) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) image.eval() def test_generator_graph(self): tf.set_random_seed(1234) # Check graph construction for a number of image size/depths and batch # sizes. for i, batch_size in zip(xrange(3, 7), xrange(3, 8)): tf.reset_default_graph() final_size = 2 ** i noise = tf.random_normal([batch_size, 64]) image, end_points = dcgan.generator( noise, depth=32, final_size=final_size) self.assertAllEqual([batch_size, final_size, final_size, 3], image.shape.as_list()) expected_names = ['deconv%i' % j for j in xrange(1, i)] + ['logits'] self.assertSetEqual(set(expected_names), set(end_points.keys())) # Check layer depths. for j in range(1, i): layer = end_points['deconv%i' % j] self.assertEqual(32 * 2**(i-j-1), layer.get_shape().as_list()[-1]) def test_generator_invalid_input(self): wrong_dim_input = tf.zeros([5, 32, 32]) with self.assertRaises(ValueError): dcgan.generator(wrong_dim_input) correct_input = tf.zeros([3, 2]) with self.assertRaisesRegexp(ValueError, 'must be a power of 2'): dcgan.generator(correct_input, final_size=30) with self.assertRaisesRegexp(ValueError, 'must be greater than 8'): dcgan.generator(correct_input, final_size=4) def test_discriminator_run(self): image = tf.random_uniform([5, 32, 32, 3], -1, 1) output, _ = dcgan.discriminator(image) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output.eval() def test_discriminator_graph(self): # Check graph construction for a number of image size/depths and batch # sizes. for i, batch_size in zip(xrange(1, 6), xrange(3, 8)): tf.reset_default_graph() img_w = 2 ** i image = tf.random_uniform([batch_size, img_w, img_w, 3], -1, 1) output, end_points = dcgan.discriminator( image, depth=32) self.assertAllEqual([batch_size, 1], output.get_shape().as_list()) expected_names = ['conv%i' % j for j in xrange(1, i+1)] + ['logits'] self.assertSetEqual(set(expected_names), set(end_points.keys())) # Check layer depths. for j in range(1, i+1): layer = end_points['conv%i' % j] self.assertEqual(32 * 2**(j-1), layer.get_shape().as_list()[-1]) def test_discriminator_invalid_input(self): wrong_dim_img = tf.zeros([5, 32, 32]) with self.assertRaises(ValueError): dcgan.discriminator(wrong_dim_img) spatially_undefined_shape = tf.placeholder(tf.float32, [5, 32, None, 3]) with self.assertRaises(ValueError): dcgan.discriminator(spatially_undefined_shape) not_square = tf.zeros([5, 32, 16, 3]) with self.assertRaisesRegexp(ValueError, 'not have equal width and height'): dcgan.discriminator(not_square) not_power_2 = tf.zeros([5, 30, 30, 3]) with self.assertRaisesRegexp(ValueError, 'not a power of 2'): dcgan.discriminator(not_power_2) if __name__ == '__main__': tf.test.main()
PyTorch/Forecasting/TFT/triton/runner/maintainer/docker
docker
__init__
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/Classification/GPUNet/triton/05ms-D/runner
runner
start_NVIDIA-DGX-1-(1x-V100-32GB)
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Evaluate Runner python3 -m "triton.05ms-D.runner.__main__" \ --config-path "triton/05ms-D/runner/config_NVIDIA-DGX-1-(1x-V100-32GB).yaml" \ --device 0
TensorFlow/Detection/SSD/models/research/object_detection/builders
builders
image_resizer_builder_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.builders.image_resizer_builder.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import image_resizer_builder from object_detection.protos import image_resizer_pb2 class ImageResizerBuilderTest(tf.test.TestCase): def _shape_of_resized_random_image_given_text_proto(self, input_shape, text_proto): image_resizer_config = image_resizer_pb2.ImageResizer() text_format.Merge(text_proto, image_resizer_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) images = tf.to_float( tf.random_uniform(input_shape, minval=0, maxval=255, dtype=tf.int32)) resized_images, _ = image_resizer_fn(images) with self.test_session() as sess: return sess.run(resized_images).shape def test_build_keep_aspect_ratio_resizer_returns_expected_shape(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 } """ input_shape = (50, 25, 3) expected_output_shape = (20, 10, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_build_keep_aspect_ratio_resizer_grayscale(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 convert_to_grayscale: true } """ input_shape = (50, 25, 3) expected_output_shape = (20, 10, 1) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_build_keep_aspect_ratio_resizer_with_padding(self): image_resizer_text_proto = """ keep_aspect_ratio_resizer { min_dimension: 10 max_dimension: 20 pad_to_max_dimension: true per_channel_pad_value: 3 per_channel_pad_value: 4 per_channel_pad_value: 5 } """ input_shape = (50, 25, 3) expected_output_shape = (20, 20, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_built_fixed_shape_resizer_returns_expected_shape(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 10 width: 20 } """ input_shape = (50, 25, 3) expected_output_shape = (10, 20, 3) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_built_fixed_shape_resizer_grayscale(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 10 width: 20 convert_to_grayscale: true } """ input_shape = (50, 25, 3) expected_output_shape = (10, 20, 1) output_shape = self._shape_of_resized_random_image_given_text_proto( input_shape, image_resizer_text_proto) self.assertEqual(output_shape, expected_output_shape) def test_raises_error_on_invalid_input(self): invalid_input = 'invalid_input' with self.assertRaises(ValueError): image_resizer_builder.build(invalid_input) def _resized_image_given_text_proto(self, image, text_proto): image_resizer_config = image_resizer_pb2.ImageResizer() text_format.Merge(text_proto, image_resizer_config) image_resizer_fn = image_resizer_builder.build(image_resizer_config) image_placeholder = tf.placeholder(tf.uint8, [1, None, None, 3]) resized_image, _ = image_resizer_fn(image_placeholder) with self.test_session() as sess: return sess.run(resized_image, feed_dict={image_placeholder: image}) def test_fixed_shape_resizer_nearest_neighbor_method(self): image_resizer_text_proto = """ fixed_shape_resizer { height: 1 width: 1 resize_method: NEAREST_NEIGHBOR } """ image = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) image = np.expand_dims(image, axis=2) image = np.tile(image, (1, 1, 3)) image = np.expand_dims(image, axis=0) resized_image = self._resized_image_given_text_proto( image, image_resizer_text_proto) vals = np.unique(resized_image).tolist() self.assertEqual(len(vals), 1) self.assertEqual(vals[0], 1) if __name__ == '__main__': tf.test.main()
TensorFlow/Classification/ConvNets/resnext101-32x4d/training
training
DGXA100_RNxt101-32x4d_TF32_90E
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKSPACE=${1:-"/workspace/rn50v15_tf"} DATA_DIR=${2:-"/data"} OTHER=${@:3} if [[ ! -z "${BIND_TO_SOCKET}" ]]; then BIND_TO_SOCKET="--bind-to socket" fi mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnext101-32x4d \ --mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \ --batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \ --lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \ --data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \ --results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_INFER_BENCHMARK
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP32 on 1 GPU for inference benchmarking. Usage: # bash unet_INFER_BENCHMARK_FP32.sh <path to dataset> <path to results directory> <batch size> horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode predict --benchmark --warmup_steps 200 --max_steps 600 --xla
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading
dataloading
transcribe
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import os import argparse from .feature_spec import FeatureSpec from .dataloader import create_input_pipelines from .split_tfrecords_multihot_dataset import SplitTFRecordsDataset from .raw_binary_dataset import TfRawBinaryDataset def parse_args(): p = argparse.ArgumentParser(description="Transcribe from one dataset format to another") p.add_argument('--src_dataset_path', default='synthetic_dataset', type=str, help='Path to the source directory') p.add_argument('--src_dataset_type', default='tf_raw', choices=['tf_raw', 'synthetic', 'binary_multihot', 'tfrecords_multihot', 'nvt', 'split_tfrecords'], help='The type of the source dataset') p.add_argument('--src_feature_spec', default='feature_spec.yaml', type=str, help='Feature spec filename') p.add_argument('--src_batch_size', default=65536, type=int, help='Batch size of the source dataset') p.add_argument('--src_synthetic_dataset_use_feature_spec', action='store_true', help='Use feature spec for the synthetic dataset') p.add_argument('--dst_dataset_path', default='synthetic_dataset', type=str, help='Path to the destination directory') p.add_argument('--dst_prebatch_size', default=65536, type=int, help='Prebatch size for the dst dataset') p.add_argument('--dst_feature_spec', type=str, default='feature_spec.yaml', help='Dst feature spec filename') p.add_argument('--dst_dataset_type', default='split_tfrecords', choices=['tf_raw', 'synthetic', 'binary_multihot', 'tfrecords_multihot', 'nvt', 'split_tfrecords'], help='The type of the source dataset') p.add_argument('--max_batches_train', default=-1, type=int, help='Max number of train batches to transcribe. Passing -1 will transcribe all the data.') p.add_argument('--max_batches_test', default=-1, type=int, help='Max number of test batches to transcribe. Passing -1 will transcribe all the data.') p.add_argument('--train_only', action='store_true', default=False, help='Only transcribe the train dataset.') return p.parse_args() def main(): args = parse_args() fspec_path = os.path.join(args.src_dataset_path, args.src_feature_spec) feature_spec = FeatureSpec.from_yaml(fspec_path) table_ids = list(range(len(feature_spec.get_categorical_sizes()))) src_train, src_test = create_input_pipelines(dataset_type=args.src_dataset_type, dataset_path=args.src_dataset_path, train_batch_size=args.src_batch_size, test_batch_size=args.src_batch_size, table_ids=table_ids, feature_spec=args.src_feature_spec, rank=0, world_size=1) os.makedirs(args.dst_dataset_path, exist_ok=True) if args.dst_dataset_type == 'split_tfrecords': SplitTFRecordsDataset.generate(src_train=src_train, src_test=src_test, feature_spec=feature_spec, dst_dir=args.dst_dataset_path, dst_feature_spec=args.dst_feature_spec, prebatch_size=args.dst_prebatch_size, max_batches_train=args.max_batches_train, max_batches_test=args.max_batches_test) elif args.dst_dataset_type == 'tf_raw': TfRawBinaryDataset.generate(src_train=src_train, src_test=src_test, feature_spec=feature_spec, dst_dir=args.dst_dataset_path, dst_feature_spec=args.dst_feature_spec, max_batches_train=args.max_batches_train, max_batches_test=args.max_batches_test) else: raise ValueError(f'Unimplemented dst_dataset_type: {args.dst_dataset_type}') print('Done.') if __name__ == '__main__': main()
PyTorch/LanguageModeling/BERT/scripts
scripts
run_squad
#!/usr/bin/env bash # Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Container nvidia build = " $NVIDIA_BUILD_ID init_checkpoint=${1:-"/workspace/bert/checkpoints/bert_uncased.pt"} epochs=${2:-"2.0"} batch_size=${3:-"4"} learning_rate=${4:-"3e-5"} warmup_proportion=${5:-"0.1"} precision=${6:-"fp16"} num_gpu=${7:-"8"} seed=${8:-"1"} squad_dir=${9:-"$BERT_PREP_WORKING_DIR/download/squad/v1.1"} vocab_file=${10:-"$BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt"} OUT_DIR=${11:-"/workspace/bert/results/SQuAD"} mode=${12:-"train eval"} CONFIG_FILE=${13:-"/workspace/bert/bert_configs/large.json"} max_steps=${14:-"-1"} echo "out dir is $OUT_DIR" mkdir -p $OUT_DIR if [ ! -d "$OUT_DIR" ]; then echo "ERROR: non existing $OUT_DIR" exit 1 fi use_fp16="" if [ "$precision" = "fp16" ] ; then echo "fp16 activated!" use_fp16=" --fp16 " fi if [ "$num_gpu" = "1" ] ; then export CUDA_VISIBLE_DEVICES=0 mpi_command="" else unset CUDA_VISIBLE_DEVICES mpi_command=" -m torch.distributed.launch --nproc_per_node=$num_gpu" fi CMD="python $mpi_command run_squad.py " CMD+="--init_checkpoint=$init_checkpoint " if [ "$mode" = "train" ] ; then CMD+="--do_train " CMD+="--train_file=$squad_dir/train-v1.1.json " CMD+="--train_batch_size=$batch_size " elif [ "$mode" = "eval" ] ; then CMD+="--do_predict " CMD+="--predict_file=$squad_dir/dev-v1.1.json " CMD+="--predict_batch_size=$batch_size " CMD+="--eval_script=$squad_dir/evaluate-v1.1.py " CMD+="--do_eval " elif [ "$mode" = "prediction" ] ; then CMD+="--do_predict " CMD+="--predict_file=$squad_dir/dev-v1.1.json " CMD+="--predict_batch_size=$batch_size " else CMD+=" --do_train " CMD+=" --train_file=$squad_dir/train-v1.1.json " CMD+=" --train_batch_size=$batch_size " CMD+="--do_predict " CMD+="--predict_file=$squad_dir/dev-v1.1.json " CMD+="--predict_batch_size=$batch_size " CMD+="--eval_script=$squad_dir/evaluate-v1.1.py " CMD+="--do_eval " fi CMD+=" --do_lower_case " CMD+=" --bert_model=bert-large-uncased " CMD+=" --learning_rate=$learning_rate " CMD+=" --warmup_proportion=$warmup_proportion" CMD+=" --seed=$seed " CMD+=" --num_train_epochs=$epochs " CMD+=" --max_seq_length=384 " CMD+=" --doc_stride=128 " CMD+=" --output_dir=$OUT_DIR " CMD+=" --vocab_file=$vocab_file " CMD+=" --config_file=$CONFIG_FILE " CMD+=" --max_steps=$max_steps " CMD+=" $use_fp16" LOGFILE=$OUT_DIR/logfile.txt echo "$CMD |& tee $LOGFILE" time $CMD |& tee $LOGFILE
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/graph
graph
fitter
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Optional import numpy as np from scipy.optimize import minimize from syngen.utils.types import NDArray from syngen.generator.graph.utils import get_degree_distribution, move_ndarray_to_host from syngen.utils.utils import infer_operator MAXK = 1000 class RMATFitter(object): def __init__(self, fast=True, random=False): self._loglik = self._fast_loglik if fast else self._original_loglik self.random = random def _get_p_directed_graph(self, dd, verbose=False): num_nodes = dd[:, 1].sum() n_exp2 = int(np.ceil(np.log2(num_nodes))) E = (dd[:, 0] * dd[:, 1]).sum() mx = min(dd[-1, 0], MAXK) logeck = np.zeros(shape=(mx + 1), dtype=np.float64) tmp = 0 for k in range(1, mx + 1): logeck[k] = tmp + np.log(E - k + 1) - np.log(k) tmp = logeck[k] lognci = np.zeros(shape=(n_exp2 + 1), dtype=np.float64) tmp = 0 for i in range(1, n_exp2 + 1): lognci[i] = tmp + np.log(n_exp2 - i + 1) - np.log(i) tmp = lognci[i] x0 = np.array([0.5], dtype=np.float64) self.optimization_steps = [] fun = lambda x: self._loglik(x, E, n_exp2, dd, logeck, lognci, MAXK) res = minimize( fun, x0, method="Nelder-Mead", bounds=[(1e-4, 1.0 - 1e-4)], options={"disp": verbose, "fatol": 1e-4}, ) return res.x[0] def _original_loglik(self, p, E, n_exp, count, logeck, lognci, k_cost_threeshold): if p <= 0.0 or p >= 1.0: return 1e100 q = p a = 0.75 * p b = p - a c = q - a if (a + b + c) >= 1.0: return 1e100 Sx = 0.0 Sx2 = 0.0 Sx3 = 0.0 Sx4 = 0.0 Sy = 0.0 Sxy = 0.0 Sx2y = 0.0 numX = count[-1, 0] totObs = 0.0 prevY = 0.0 for m in range(1, numX + 1): x = np.log(m) if m <= MAXK: current_sum = np.exp( logeck[m] + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logeck[m] + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1.0 - p) * (m * i) + np.log(1.0 - p ** (n_exp - i) * (1.0 - p) ** i) * (E - m) ) else: logecm = ( E * np.log(E) - m * np.log(m) - (E - m) * np.log(E - m) ) current_sum = np.exp( logecm + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logecm + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1.0 - p) * (m * i) + np.log(1.0 - p ** (n_exp - i) * (1.0 - p) ** i) * (E - m) ) y = np.log(current_sum) y = max(0, y) interpY = y while interpY > 0 and (m == 1 or x > np.log(m - 1)): Sx = Sx + x Sx2 = Sx2 + x * x Sx3 = Sx3 + x * x * x Sx4 = Sx4 + x * x * x * x Sy = Sy + interpY Sxy = Sxy + x * interpY Sx2y = Sx2y + x * x * interpY x = x - (np.log(numX) - np.log(numX - 1)) if prevY <= 0: interpY = 0 else: interpY = interpY - (interpY - prevY) / ( np.log(m) - np.log(m - 1) ) * (np.log(numX) - np.log(numX - 1)) totObs = totObs + 1 prevY = y res = np.linalg.inv( np.array([[totObs, Sx, Sx2], [Sx, Sx2, Sx3], [Sx2, Sx3, Sx4]]) ) @ np.array([Sy, Sxy, Sx2y]) ParabolaA = res[0] ParabolaB = res[1] ParabolaC = res[2] l = np.array([0.0], dtype=np.float64) for m in range(1, len(count) + 1): k = np.log(count[m - 1, 1]) expectedLogY = ( ParabolaA + ParabolaB * np.log(count[m - 1, 0]) + ParabolaC * np.log(count[m - 1, 0]) * np.log(count[m - 1, 0]) ) l = l + (k - expectedLogY) * (k - expectedLogY) self.optimization_steps.append((p[0], l[0])) return l def _fast_loglik(self, p, E, n_exp, count, logeck, lognci, k_cost_threeshold): if p <= 0.0 or p >= 1.0: return 1e100 q = p a = 0.75 * p b = p - a c = q - a if (a + b + c) >= 1.0: return 1e100 l = np.array([0.0], dtype=np.float64) for j in range(len(count)): m = count[j, 0] ck = np.log(count[j, 1]) if ck > np.log(k_cost_threeshold): if m <= MAXK: current_sum = np.exp( logeck[m] + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logeck[m] + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1 - p) * (m * i) + np.log(1 - p ** (n_exp - i) * (1 - p) ** i) * (E - m) ) else: logecm = ( E * np.log(E) - m * np.log(m) - (E - m) * np.log(E - m) ) current_sum = np.exp( logecm + np.log(p) * (n_exp * m) + np.log(1 - p ** n_exp) * (E - m) ) for i in range(1, n_exp + 1): current_sum = current_sum + np.exp( logecm + lognci[i] + np.log(p) * (m * (n_exp - i)) + np.log(1 - p) * (m * i) + np.log(1 - p ** (n_exp - i) * (1 - p) ** i) * (E - m) ) y = np.log(current_sum) y = max(0, y) l = l + (np.exp(ck) - np.exp(y)) * (np.exp(ck) - np.exp(y)) self.optimization_steps.append((p[0], l[0])) return l def _check_optimization_history(self): optimization_steps = np.array(self.optimization_steps) function_values = np.unique(optimization_steps[:, 1]) if len(function_values) <= 1: warnings.warn( "the optimization function is constant for the RMATFitter(fast=True). " "Please, use RMATFitter(fast=False) instead." ) self.optimization_steps = [] def fit(self, graph: Optional[NDArray] = None, is_directed=True, ): if self.random: return 0.25, 0.25, 0.25, 0.25 operator = infer_operator(graph) degree_values, degree_counts = get_degree_distribution(graph[:, 0], operator=operator) out_dd = operator.stack([degree_values, degree_counts], axis=1) out_dd = move_ndarray_to_host(out_dd) if is_directed: degree_values, degree_counts = get_degree_distribution(graph[:, 1], operator=operator) in_dd = operator.stack([degree_values, degree_counts], axis=1) in_dd = move_ndarray_to_host(in_dd) p = self._get_p_directed_graph(out_dd) self._check_optimization_history() if is_directed: q = self._get_p_directed_graph(in_dd) self._check_optimization_history() else: q = p a = 0.75 * (p + q) / 2 b = p - a c = q - a assert (a + b + c) < 1.0, "Cannot get correct RMat fit!" d = 1.0 - (a + b + c) return a, b, c, d
PyTorch/Translation/GNMT/seq2seq
seq2seq
gpu_affinity
import collections import math import os import pathlib import re import pynvml pynvml.nvmlInit() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affinity_elements = math.ceil(os.cpu_count() / 64) def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def getName(self): return pynvml.nvmlDeviceGetName(self.handle) def getCpuAffinity(self): affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinity( self.handle, device._nvml_affinity_elements ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list ret = [i for i, e in enumerate(affinity_list) if e != 0] return ret def set_socket_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity) def set_single_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity[:1]) def set_single_unique_affinity(gpu_id, nproc_per_node): devices = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in devices] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) affinities = [] assigned = [] for socket_affinity in socket_affinities: for core in socket_affinity: if core not in assigned: affinities.append([core]) assigned.append(core) break os.sched_setaffinity(0, affinities[gpu_id]) def set_socket_unique_affinity(gpu_id, nproc_per_node, mode): device_ids = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in device_ids] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) socket_affinities_to_device_ids = collections.defaultdict(list) for idx, socket_affinity in enumerate(socket_affinities): socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx) for socket_affinity, device_ids in socket_affinities_to_device_ids.items(): devices_per_group = len(device_ids) cores_per_device = len(socket_affinity) // devices_per_group for group_id, device_id in enumerate(device_ids): if device_id == gpu_id: if mode == 'interleaved': affinity = list(socket_affinity[group_id::devices_per_group]) elif mode == 'continuous': affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device]) else: raise RuntimeError('Unknown set_socket_unique_affinity mode') # reintroduce siblings affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict] os.sched_setaffinity(0, affinity) def get_thread_siblings_list(): path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list' thread_siblings_list = [] pattern = re.compile(r'(\d+)\D(\d+)') for fname in pathlib.Path(path[0]).glob(path[1:]): with open(fname) as f: content = f.read().strip() res = pattern.findall(content) if res: pair = tuple(map(int, res[0])) thread_siblings_list.append(pair) return thread_siblings_list def set_affinity(gpu_id, nproc_per_node, mode='socket'): if mode == 'socket': set_socket_affinity(gpu_id) elif mode == 'single': set_single_affinity(gpu_id) elif mode == 'single_unique': set_single_unique_affinity(gpu_id, nproc_per_node) elif mode == 'socket_unique_interleaved': set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved') elif mode == 'socket_unique_continuous': set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous') else: raise RuntimeError('Unknown affinity mode') affinity = os.sched_getaffinity(0) return affinity
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
variables_helper_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.variables_helper.""" import os import tensorflow as tf from object_detection.utils import variables_helper class FilterVariablesTest(tf.test.TestCase): def _create_variables(self): return [tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights'), tf.Variable(1.0, name='FeatureExtractor/InceptionV3/biases'), tf.Variable(1.0, name='StackProposalGenerator/weights'), tf.Variable(1.0, name='StackProposalGenerator/biases')] def test_return_all_variables_when_empty_regex(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['']) self.assertItemsEqual(out_variables, variables) def test_return_variables_which_do_not_match_single_regex(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, ['FeatureExtractor/.*']) self.assertItemsEqual(out_variables, variables[2:]) def test_return_variables_which_do_not_match_any_regex_in_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables(variables, [ 'FeatureExtractor.*biases', 'StackProposalGenerator.*biases' ]) self.assertItemsEqual(out_variables, [variables[0], variables[2]]) def test_return_variables_matching_empty_regex_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables( variables, [''], invert=True) self.assertItemsEqual(out_variables, []) def test_return_variables_matching_some_regex_in_list(self): variables = self._create_variables() out_variables = variables_helper.filter_variables( variables, ['FeatureExtractor.*biases', 'StackProposalGenerator.*biases'], invert=True) self.assertItemsEqual(out_variables, [variables[1], variables[3]]) class MultiplyGradientsMatchingRegexTest(tf.test.TestCase): def _create_grads_and_vars(self): return [(tf.constant(1.0), tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')), (tf.constant(2.0), tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')), (tf.constant(3.0), tf.Variable(3.0, name='StackProposalGenerator/weights')), (tf.constant(4.0), tf.Variable(4.0, name='StackProposalGenerator/biases'))] def test_multiply_all_feature_extractor_variables(self): grads_and_vars = self._create_grads_and_vars() regex_list = ['FeatureExtractor/.*'] multiplier = 0.0 grads_and_vars = variables_helper.multiply_gradients_matching_regex( grads_and_vars, regex_list, multiplier) exp_output = [(0.0, 1.0), (0.0, 2.0), (3.0, 3.0), (4.0, 4.0)] init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) output = sess.run(grads_and_vars) self.assertItemsEqual(output, exp_output) def test_multiply_all_bias_variables(self): grads_and_vars = self._create_grads_and_vars() regex_list = ['.*/biases'] multiplier = 0.0 grads_and_vars = variables_helper.multiply_gradients_matching_regex( grads_and_vars, regex_list, multiplier) exp_output = [(1.0, 1.0), (0.0, 2.0), (3.0, 3.0), (0.0, 4.0)] init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) output = sess.run(grads_and_vars) self.assertItemsEqual(output, exp_output) class FreezeGradientsMatchingRegexTest(tf.test.TestCase): def _create_grads_and_vars(self): return [(tf.constant(1.0), tf.Variable(1.0, name='FeatureExtractor/InceptionV3/weights')), (tf.constant(2.0), tf.Variable(2.0, name='FeatureExtractor/InceptionV3/biases')), (tf.constant(3.0), tf.Variable(3.0, name='StackProposalGenerator/weights')), (tf.constant(4.0), tf.Variable(4.0, name='StackProposalGenerator/biases'))] def test_freeze_all_feature_extractor_variables(self): grads_and_vars = self._create_grads_and_vars() regex_list = ['FeatureExtractor/.*'] grads_and_vars = variables_helper.freeze_gradients_matching_regex( grads_and_vars, regex_list) exp_output = [(3.0, 3.0), (4.0, 4.0)] init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) output = sess.run(grads_and_vars) self.assertItemsEqual(output, exp_output) class GetVariablesAvailableInCheckpointTest(tf.test.TestCase): def test_return_all_variables_from_checkpoint(self): with tf.Graph().as_default(): variables = [ tf.Variable(1.0, name='weights'), tf.Variable(1.0, name='biases') ] checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt') init_op = tf.global_variables_initializer() saver = tf.train.Saver(variables) with self.test_session() as sess: sess.run(init_op) saver.save(sess, checkpoint_path) out_variables = variables_helper.get_variables_available_in_checkpoint( variables, checkpoint_path) self.assertItemsEqual(out_variables, variables) def test_return_variables_available_in_checkpoint(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt') with tf.Graph().as_default(): weight_variable = tf.Variable(1.0, name='weights') global_step = tf.train.get_or_create_global_step() graph1_variables = [ weight_variable, global_step ] init_op = tf.global_variables_initializer() saver = tf.train.Saver(graph1_variables) with self.test_session() as sess: sess.run(init_op) saver.save(sess, checkpoint_path) with tf.Graph().as_default(): graph2_variables = graph1_variables + [tf.Variable(1.0, name='biases')] out_variables = variables_helper.get_variables_available_in_checkpoint( graph2_variables, checkpoint_path, include_global_step=False) self.assertItemsEqual(out_variables, [weight_variable]) def test_return_variables_available_an_checkpoint_with_dict_inputs(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt') with tf.Graph().as_default(): graph1_variables = [ tf.Variable(1.0, name='ckpt_weights'), ] init_op = tf.global_variables_initializer() saver = tf.train.Saver(graph1_variables) with self.test_session() as sess: sess.run(init_op) saver.save(sess, checkpoint_path) with tf.Graph().as_default(): graph2_variables_dict = { 'ckpt_weights': tf.Variable(1.0, name='weights'), 'ckpt_biases': tf.Variable(1.0, name='biases') } out_variables = variables_helper.get_variables_available_in_checkpoint( graph2_variables_dict, checkpoint_path) self.assertTrue(isinstance(out_variables, dict)) self.assertItemsEqual(out_variables.keys(), ['ckpt_weights']) self.assertTrue(out_variables['ckpt_weights'].op.name == 'weights') def test_return_variables_with_correct_sizes(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model.ckpt') with tf.Graph().as_default(): bias_variable = tf.Variable(3.0, name='biases') global_step = tf.train.get_or_create_global_step() graph1_variables = [ tf.Variable([[1.0, 2.0], [3.0, 4.0]], name='weights'), bias_variable, global_step ] init_op = tf.global_variables_initializer() saver = tf.train.Saver(graph1_variables) with self.test_session() as sess: sess.run(init_op) saver.save(sess, checkpoint_path) with tf.Graph().as_default(): graph2_variables = [ tf.Variable([1.0, 2.0], name='weights'), # New variable shape. bias_variable, global_step ] out_variables = variables_helper.get_variables_available_in_checkpoint( graph2_variables, checkpoint_path, include_global_step=True) self.assertItemsEqual(out_variables, [bias_variable, global_step]) if __name__ == '__main__': tf.test.main()
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head
box_head
roi_box_predictors
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch import nn class FastRCNNPredictor(nn.Module): def __init__(self, config, pretrained=None): super(FastRCNNPredictor, self).__init__() stage_index = 4 stage2_relative_factor = 2 ** (stage_index - 1) res2_out_channels = config.MODEL.RESNETS.RES2_OUT_CHANNELS num_inputs = res2_out_channels * stage2_relative_factor num_classes = config.MODEL.ROI_BOX_HEAD.NUM_CLASSES self.avgpool = nn.AvgPool2d(kernel_size=7, stride=7) self.cls_score = nn.Linear(num_inputs, num_classes) self.bbox_pred = nn.Linear(num_inputs, num_classes * 4) nn.init.normal_(self.cls_score.weight, mean=0, std=0.01) nn.init.constant_(self.cls_score.bias, 0) nn.init.normal_(self.bbox_pred.weight, mean=0, std=0.001) nn.init.constant_(self.bbox_pred.bias, 0) def forward(self, x): x = self.avgpool(x) x = x.view(x.size(0), -1) cls_logit = self.cls_score(x) bbox_pred = self.bbox_pred(x) return cls_logit, bbox_pred class FPNPredictor(nn.Module): def __init__(self, cfg): super(FPNPredictor, self).__init__() num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES representation_size = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM self.cls_score = nn.Linear(representation_size, num_classes) self.bbox_pred = nn.Linear(representation_size, num_classes * 4) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for l in [self.cls_score, self.bbox_pred]: nn.init.constant_(l.bias, 0) def forward(self, x): scores = self.cls_score(x) bbox_deltas = self.bbox_pred(x) return scores, bbox_deltas _ROI_BOX_PREDICTOR = { "FastRCNNPredictor": FastRCNNPredictor, "FPNPredictor": FPNPredictor, } def make_roi_box_predictor(cfg): func = _ROI_BOX_PREDICTOR[cfg.MODEL.ROI_BOX_HEAD.PREDICTOR] return func(cfg)
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/deployment/convert
convert
torchscript
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. config: type: torchscript
PyTorch/Recommendation/DLRM
DLRM
README
# DLRM For PyTorch This repository provides a script and recipe to train the Deep Learning Recommendation Model (DLRM) to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Hybrid-parallel multi-GPU with all-2-all communication](#hybrid-parallel-multi-gpu-with-all-2-all-communication) * [Embedding table placement and load balancing](#embedding-table-placement-and-load-balancing) * [Preprocessing on GPU](#preprocessing-on-gpu) * [BYO dataset functionality overview](#byo-dataset-functionality-overview) * [Glossary](#glossary) * [Dataset feature specification](#dataset-feature-specification) * [Data flow in NVIDIA Deep Learning Examples recommendation models](#data-flow-in-nvidia-deep-learning-examples-recommendation-models) * [Example of dataset feature specification](#example-of-dataset-feature-specification) * [BYO dataset functionality](#byo-dataset-functionality) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [BYO dataset](#byo-dataset) * [Channel definitions and requirements](#channel-definitions-and-requirements) * [BYO dataset constraints for the model](#BYO-dataset-constraints-for-the-model) * [Preprocessing](#preprocessing) * [NVTabular](#nvtabular) * [Spark](#spark) * [Training process](#training-process) * [Inference process](#inference-process) * [Deploying DLRM Using NVIDIA Triton Inference Server](#deploying-dlrm-using-nvidia-triton-inference-server) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 32GB)](#training-accuracy-nvidia-dgx-1-8x-v100-32gb) * [Training accuracy plots](#training-accuracy-plots) * [Training stability test](#training-stability-test) * [Impact of mixed precision on training accuracy](#impact-of-mixed-precision-on-training-accuracy) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-1 (8x V100 32GB)](#training-performance-nvidia-dgx-1-8x-v100-32gb) * [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb) * [Inference performance: NVIDIA DGX-1 (1x V100 32GB)](#inference-performance-nvidia-dgx-1-1x-v100-32gb) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The Deep Learning Recommendation Model (DLRM) is a recommendation model designed to make use of both categorical and numerical inputs. It was first described in [Deep Learning Recommendation Model for Personalization and Recommendation Systems](https://arxiv.org/abs/1906.00091). This repository provides a reimplementation of the codebase provided originally [here](https://github.com/facebookresearch/dlrm). The scripts provided enable you to train DLRM on the [Criteo Terabyte Dataset](https://labs.criteo.com/2013/12/download-terabyte-click-logs/). Using the scripts provided here, you can efficiently train models that are too large to fit into a single GPU. This is because we use a hybrid-parallel approach, which combines model parallelism for the embedding tables with data parallelism for the Top MLP. This is explained in details in [next sections](#hybrid-parallel-multigpu-with-all-2-all-communication). This model uses a slightly different preprocessing procedure than the one found in the original implementation. You can find a detailed description of the preprocessing steps in the [Dataset guidelines](#dataset-guidelines) section. Using DLRM you can train a high-quality general model for providing recommendations. This model is trained with mixed precision using Tensor Cores on Volta, Turing, and NVIDIA Ampere GPU architectures. Therefore, researchers can get results up to 3.3x faster than training without Tensor Cores while experiencing the benefits of mixed precision training. It is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture DLRM accepts two types of features: categorical and numerical. For each categorical feature, an embedding table is used to provide dense representation to each unique value. The dense features enter the model and are transformed by a simple neural network referred to as "bottom MLP". This part of the network consists of a series of linear layers with ReLU activations. The output of the bottom MLP and the embedding vectors are then fed into the "dot interaction" operation. The output of "dot interaction" is then concatenated with the features resulting from the bottom MLP and fed into the "top MLP" which is also a series of dense layers with activations. The model outputs a single number which can be interpreted as a likelihood of a certain user clicking an ad. <p align="center"> <img width="100%" src="./notebooks/DLRM_architecture.png" /> <br> Figure 1. The architecture of DLRM. </p> ### Default configuration The following features were implemented in this model: - general - static loss scaling for Tensor Cores (mixed precision) training - hybrid-parallel multi-GPU training - preprocessing - dataset preprocessing using Spark 3 on GPUs - dataset preprocessing using NVTabular on GPUs ### Feature support matrix This model supports the following features: | Feature | DLRM |-----------------------------------------|----- |Automatic mixed precision (AMP) | yes |CUDA Graphs | yes |Hybrid-parallel multi-GPU with all-2-all | yes |Preprocessing on GPU with NVTabular | yes |Preprocessing on GPU with Spark 3 | yes #### Features Automatic Mixed Precision (AMP) - enables mixed precision training without any changes to the code-base by performing automatic graph rewrites and loss scaling controlled by an environmental variable. CUDA Graphs - This feature allows to launch multiple GPU operations through a single CPU operation. The result is a vast reduction in CPU overhead. The benefits are particularly pronounced when training with relatively small batch sizes. The CUDA Graphs feature has been available through a [native PyTorch API](https://pytorch.org/docs/master/notes/cuda.html#cuda-graphs) starting from PyTorch v1.10. Multi-GPU training with PyTorch distributed - our model uses `torch.distributed` to implement efficient multi-GPU training with NCCL. For details, see example sources in this repository or see the [PyTorch Tutorial](https://pytorch.org/tutorials/intermediate/dist_tuto.html). Preprocessing on GPU with NVTabular - Criteo dataset preprocessing can be conducted using [NVTabular](https://github.com/NVIDIA/NVTabular). For more information on the framework, see the [Announcing the NVIDIA NVTabular Open Beta with Multi-GPU Support and New Data Loaders](https://developer.nvidia.com/blog/announcing-the-nvtabular-open-beta-with-multi-gpu-support-and-new-data-loaders/). Preprocessing on GPU with Spark 3 - Criteo dataset preprocessing can be conducted using [Apache Spark 3.0](https://spark.apache.org/). For more information on the framework and how to leverage GPU to preprocessing, see the [Accelerating Apache Spark 3.0 with GPUs and RAPIDS](https://developer.nvidia.com/blog/accelerating-apache-spark-3-0-with-gpus-and-rapids/). ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in the half-precision floating-point format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision &ndash; up to 3.3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision Mixed precision training is turned off by default. To turn it on issue the `--amp` flag to the `main.py` script. #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models that require a high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Hybrid-parallel multi-GPU with all-2-all communication Many recommendation models contain very large embedding tables. As a result, the model is often too large to fit onto a single device. This could be easily solved by training in a model-parallel way, using either the CPU or other GPUs as "memory donors". However, this approach is suboptimal as the "memory donor" devices' compute is not utilized. In this repository, we use the model-parallel approach for the bottom part of the model (Embedding Tables + Bottom MLP) while using a usual data parallel approach for the top part of the model (Dot Interaction + Top MLP). This way we can train models much larger than what would normally fit into a single GPU while at the same time making the training faster by using multiple GPUs. We call this approach hybrid-parallel. The transition from model-parallel to data-parallel in the middle of the neural net needs a specific multi-GPU communication pattern called [all-2-all](https://en.wikipedia.org/wiki/All-to-all_\(parallel_pattern\)) which is available in our [PyTorch 21.04-py3](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch/tags) NGC docker container. In the [original DLRM whitepaper](https://arxiv.org/abs/1906.00091) this has been also referred to as "butterfly shuffle". <p align="center"> <img width="100%" src="./img/hybrid_parallel.png" /> <br> </p> In the example shown in this repository we train models of three sizes: "small" (~15 GB), "large" (~82 GB), and "xlarge" (~142 GB). We use the hybrid-parallel approach for the "large" and "xlarge" models, as they do not fit in a single GPU. #### Embedding table placement and load balancing We use the following heuristic for dividing the work between the GPUs: - The Bottom MLP is placed on GPU-0 and no embedding tables are placed on this device. - The tables are sorted from the largest to the smallest - Set `max_tables_per_gpu := ceil(number_of_embedding_tables / number_of_available_gpus)` - Repeat until all embedding tables have an assigned device: - Out of all the available GPUs find the one with the largest amount of unallocated memory - Place the largest unassigned embedding table on this GPU. Raise an exception if it does not fit. - If the number of embedding tables on this GPU is now equal to `max_tables_per_gpu` remove this GPU from the list of available GPUs so that no more embedding tables will be placed on this GPU. This ensures the all2all communication is well balanced between all devices. ### Preprocessing on GPU Please refer to [the "Preprocessing" section](#preprocessing) for a detailed description of the Apache Spark 3.0 and NVTabular GPU functionality ### BYO dataset functionality overview This section describes how you can train the DeepLearningExamples RecSys models on your own datasets without changing the model or data loader and with similar performance to the one published in each repository. This can be achieved thanks to Dataset Feature Specification, which describes how the dataset, data loader and model interact with each other during training, inference and evaluation. Dataset Feature Specification has a consistent format across all recommendation models in NVIDIA’s DeepLearningExamples repository, regardless of dataset file type and the data loader, giving you the flexibility to train RecSys models on your own datasets. - [Glossary](#glossary) - [Dataset Feature Specification](#dataset-feature-specification) - [Data Flow in Recommendation Models in DeepLearning examples](#data-flow-in-nvidia-deep-learning-examples-recommendation-models) - [Example of Dataset Feature Specification](#example-of-dataset-feature-specification) - [BYO dataset functionality](#byo-dataset-functionality) #### Glossary The Dataset Feature Specification consists of three mandatory and one optional section: <b>feature_spec </b> provides a base of features that may be referenced in other sections, along with their metadata. Format: dictionary (feature name) => (metadata name => metadata value)<br> <b>source_spec </b> provides information necessary to extract features from the files that store them. Format: dictionary (mapping name) => (list of chunks)<br> * <i>Mappings</i> are used to represent different versions of the dataset (think: train/validation/test, k-fold splits). A mapping is a list of chunks.<br> * <i>Chunks</i> are subsets of features that are grouped together for saving. For example, some formats may constrain data saved in one file to a single data type. In that case, each data type would correspond to at least one chunk. Another example where this might be used is to reduce file size and enable more parallel loading. Chunk description is a dictionary of three keys:<br> * <i>type</i> provides information about the format in which the data is stored. Not all formats are supported by all models.<br> * <i>features</i> is a list of features that are saved in a given chunk. Order of this list may matter: for some formats, it is crucial for assigning read data to the proper feature.<br> * <i>files</i> is a list of paths to files where the data is saved. For Feature Specification in yaml format, these paths are assumed to be relative to the yaml file’s directory (basename). <u>Order of this list matters:</u> It is assumed that rows 1 to i appear in the first file, rows i+1 to j in the next one, etc. <br> <b>channel_spec</b> determines how features are used. It is a mapping (channel name) => (list of feature names). Channels are model specific magic constants. In general, data within a channel is processed using the same logic. Example channels: model output (labels), categorical ids, numerical inputs, user data, and item data. <b>metadata</b> is a catch-all, wildcard section: If there is some information about the saved dataset that does not fit into the other sections, you can store it here. #### Dataset feature specification Data flow can be described abstractly: Input data consists of a list of rows. Each row has the same number of columns; each column represents a feature. The columns are retrieved from the input files, loaded, aggregated into channels and supplied to the model/training script. FeatureSpec contains metadata to configure this process and can be divided into three parts: * Specification of how data is organized on disk (source_spec). It describes which feature (from feature_spec) is stored in which file and how files are organized on disk. * Specification of features (feature_spec). Describes a dictionary of features, where key is feature name and values are features’ characteristics such as dtype and other metadata (for example, cardinalities for categorical features) * Specification of model’s inputs and outputs (channel_spec). Describes a dictionary of model’s inputs where keys specify model channel’s names and values specify lists of features to be loaded into that channel. Model’s channels are groups of data streams to which common model logic is applied, for example categorical/continuous data, user/item ids. Required/available channels depend on the model The FeatureSpec is a common form of description regardless of underlying dataset format, dataset data loader form and model. #### Data flow in NVIDIA Deep Learning Examples recommendation models The typical data flow is as follows: * <b>S.0.</b> Original dataset is downloaded to a specific folder. * <b>S.1.</b> Original dataset is preprocessed into Intermediary Format. For each model, the preprocessing is done differently, using different tools. The Intermediary Format also varies (for example, for DLRM PyTorch, the Intermediary Format is a custom binary one.) * <b>S.2.</b> The Preprocessing Step outputs Intermediary Format with dataset split into training and validation/testing parts along with the Dataset Feature Specification yaml file. Metadata in the preprocessing step is automatically calculated. * <b>S.3.</b> Intermediary Format data together with Dataset Feature Specification are fed into training/evaluation scripts. Data loader reads Intermediary Format and feeds the data into the model according to the description in the Dataset Feature Specification. * <b>S.4.</b> The model is trained and evaluated <p align="center"> <img width="70%" src="./img/df_diagram.png" /> <br> Fig.1. Data flow in Recommender models in NVIDIA Deep Learning Examples repository. Channels of the model are drawn in green</a>. </p> #### Example of dataset feature specification As an example, let’s consider a Dataset Feature Specification for a small CSV dataset for some abstract model. ```yaml feature_spec: user_gender: dtype: torch.int8 cardinality: 3 #M,F,Other user_age: #treated as numeric value dtype: torch.int8 user_id: dtype: torch.int32 cardinality: 2655 item_id: dtype: torch.int32 cardinality: 856 label: dtype: torch.float32 source_spec: train: - type: csv features: - user_gender - user_age files: - train_data_0_0.csv - train_data_0_1.csv - type: csv features: - user_id - item_id - label files: - train_data_1.csv test: - type: csv features: - user_id - item_id - label - user_gender - user_age files: - test_data.csv channel_spec: numeric_inputs: - user_age categorical_user_inputs: - user_gender - user_id categorical_item_inputs: - item_id label_ch: - label ``` The data contains five features: (user_gender, user_age, user_id, item_id, label). Their data types and necessary metadata are described in the feature specification section. In the source mapping section, two mappings are provided: one describes the layout of the training data, the other of the testing data. The layout for training data has been chosen arbitrarily to showcase the flexibility. The train mapping consists of two chunks. The first one contains user_gender and user_age, saved as a CSV, and is further broken down into two files. For specifics of the layout, refer to the following example and consult the glossary. The second chunk contains the remaining columns and is saved in a single file. Notice that the order of columns is different in the second chunk - this is alright, as long as the order matches the order in that file (that is, columns in the .csv are also switched) Let’s break down the train source mapping. The table contains example data color-paired to the files containing it. <p align="center"> <img width="70%" src="./img/layout_example.png" /> </p> The channel spec describes how the data will be consumed. Four streams will be produced and available to the script/model. The feature specification does not specify what happens further: names of these streams are only lookup constants defined by the model/script. Based on this example, we can speculate that the model has three input channels: numeric_inputs, categorical_user_inputs, categorical_item_inputs, and one output channel: label. Feature names are internal to the FeatureSpec and can be freely modified. #### BYO dataset functionality In order to train any Recommendation model in NVIDIA Deep Learning Examples one can follow one of three possible ways: * One delivers already preprocessed dataset in the Intermediary Format supported by data loader used by the training script (different models use different data loaders) together with FeatureSpec yaml file describing at least specification of dataset, features and model channels * One uses a transcoding script * One delivers dataset in non-preprocessed form and uses preprocessing scripts that are a part of the model repository. In order to use already existing preprocessing scripts, the format of the dataset needs to match the one of the original datasets. This way, the FeatureSpec file will be generated automatically, but the user will have the same preprocessing as in the original model repository. ## Setup The following section lists the requirements for training DLRM. ### Requirements This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [PyTorch 21.10-py3](https://ngc.nvidia.com/catalog/containers/nvidia:pytorch/tags) NGC container - Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) - [Running PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running) For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of DLRM on the Criteo Terabyte dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/PyTorch/Recommendation/DLRM ``` 2. Download the dataset. You can download the data by following the instructions at: http://labs.criteo.com/2013/12/download-terabyte-click-logs/. When you have successfully downloaded it and unpacked it, set the `CRITEO_DATASET_PARENT_DIRECTORY` to its parent directory: ``` CRITEO_DATASET_PARENT_DIRECTORY=/raid/dlrm ``` We recommend to choose the fastest possible file system, otherwise it may lead to an IO bottleneck. 3. Build DLRM Docker containers ```bash docker build -t nvidia_dlrm_pyt . docker build -t nvidia_dlrm_preprocessing -f Dockerfile_preprocessing . --build-arg DGX_VERSION=[DGX-2|DGX-A100] ``` 3. Start an interactive session in the NGC container to run preprocessing. The DLRM PyTorch container can be launched with: ```bash docker run --runtime=nvidia -it --rm --ipc=host -v ${CRITEO_DATASET_PARENT_DIRECTORY}:/data/dlrm nvidia_dlrm_preprocessing bash ``` 4. Preprocess the dataset. Here are a few examples of different preprocessing commands. Out of the box, we support preprocessing on DGX-2 and DGX A100 systems. For the details on how those scripts work and detailed description of dataset types (small FL=15, large FL=3, xlarge FL=2), system requirements, setup instructions for different systems and all the parameters consult the [preprocessing section](#preprocessing). For an explanation of the `FL` parameter, see the [Dataset Guidelines](#dataset-guidelines) and [Preprocessing](#preprocessing) sections. Depending on dataset type (small FL=15, large FL=3, xlarge FL=2) run one of following command: 4.1. Preprocess to small dataset (FL=15) with Spark GPU: ```bash cd /workspace/dlrm/preproc ./prepare_dataset.sh 15 GPU Spark ``` 4.2. Preprocess to large dataset (FL=3) with Spark GPU: ```bash cd /workspace/dlrm/preproc ./prepare_dataset.sh 3 GPU Spark ``` 4.3. Preprocess to xlarge dataset (FL=2) with Spark GPU: ```bash cd /workspace/dlrm/preproc ./prepare_dataset.sh 2 GPU Spark ``` 5. Start training. - First start the docker container (adding `--security-opt seccomp=unconfined` option is needed to take the full advantage of processor affinity in multi-GPU training): ```bash docker run --security-opt seccomp=unconfined --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_dlrm_pyt bash ``` - single-GPU: ```bash python -m dlrm.scripts.main --mode train --dataset /data/dlrm/binary_dataset/ --amp --cuda_graphs ``` - multi-GPU for DGX A100: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=dgxa100_ccx.sh --mem=dgxa100_ccx.sh python -m dlrm.scripts.main \ --dataset /data/dlrm/binary_dataset/ --seed 0 --epochs 1 --amp --cuda_graphs' ``` - multi-GPU for DGX-1 and DGX-2: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=exclusive -- python -m dlrm.scripts.main \ --dataset /data/dlrm/binary_dataset/ --seed 0 --epochs 1 --amp --cuda_graphs' ``` 6. Start validation/evaluation. If you want to run validation or evaluation, you can either: - use the checkpoint obtained from the training commands above, or - download the pretrained checkpoint from NGC. In order to download the checkpoint from NGC, visit ngc.nvidia.com website and browse the available models. Download the checkpoint files and unzip them to some path, for example, to `$CRITEO_DATASET_PARENT_DIRECTORY/checkpoints/`. The checkpoint requires around 15GB of disk space. Commands: - single-GPU: ```bash python -m dlrm.scripts.main --mode test --dataset /data/dlrm/binary_dataset/ --load_checkpoint_path `$CRITEO_DATASET_PARENT_DIRECTORY/checkpoints/checkpoint` ``` - multi-GPU for DGX A100: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=dgxa100_ccx.sh --mem=dgxa100_ccx.sh python -m dlrm.scripts.main \ --dataset /data/dlrm/binary_dataset/ --seed 0 --epochs 1 --amp --load_checkpoint_path `$CRITEO_DATASET_PARENT_DIRECTORY/checkpoints/checkpoint`' ``` - multi-GPU for DGX-1 and DGX-2: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=exclusive -- python -m dlrm.scripts.main \ --dataset /data/dlrm/binary_dataset/ --seed 0 --epochs 1 --amp --load_checkpoint_path `$CRITEO_DATASET_PARENT_DIRECTORY/checkpoints/checkpoint`' ``` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code The `dlrm/scripts/main.py` script provides an entry point to most of the functionality. Using different command-line flags allows you to run training, validation, and benchmark both training and inference on real or synthetic data. Utilities related to loading the data reside in the `data` directory. ### Command-line options The `dlrm/scripts/main.py` script supports a number of command-line flags. You can get the descriptions of those by running `python -m dlrm.scripts.main --help`. The following example output is printed when running the model: ``` Epoch:[0/1] [200/128028] eta: 1:28:44 loss: 0.1782 step_time: 0.041657 lr: 0.8794 Epoch:[0/1] [400/128028] eta: 1:25:15 loss: 0.1403 step_time: 0.038504 lr: 1.7544 Epoch:[0/1] [600/128028] eta: 1:23:56 loss: 0.1384 step_time: 0.038422 lr: 2.6294 Epoch:[0/1] [800/128028] eta: 1:23:13 loss: 0.1370 step_time: 0.038421 lr: 3.5044 Epoch:[0/1] [1000/128028] eta: 1:22:45 loss: 0.1362 step_time: 0.038464 lr: 4.3794 Epoch:[0/1] [1200/128028] eta: 1:22:24 loss: 0.1346 step_time: 0.038455 lr: 5.2544 Epoch:[0/1] [1400/128028] eta: 1:22:07 loss: 0.1339 step_time: 0.038459 lr: 6.1294 Epoch:[0/1] [1600/128028] eta: 1:21:52 loss: 0.1320 step_time: 0.038481 lr: 7.0044 Epoch:[0/1] [1800/128028] eta: 1:21:39 loss: 0.1315 step_time: 0.038482 lr: 7.8794 Epoch:[0/1] [2000/128028] eta: 1:21:27 loss: 0.1304 step_time: 0.038466 lr: 8.7544 Epoch:[0/1] [2200/128028] eta: 1:21:15 loss: 0.1305 step_time: 0.038430 lr: 9.6294 ``` ### Getting the data This example uses the [Criteo Terabyte Dataset](https://labs.criteo.com/2013/12/download-terabyte-click-logs/). The first 23 days are used as the training set. The last day is split in half. The first part, referred to as "test", is used for validating training results. The second one, referred to as "validation", is unused. #### Dataset guidelines The preprocessing steps applied to the raw data include: - Replacing the missing values with `0` - Replacing the categorical values that exist fewer than `FL` times with a special value (FL value is called a frequency threshold or a frequency limit) - Converting the hash values to consecutive integers - Adding 3 to all the numerical features so that all of them are greater or equal to 1 - Taking a natural logarithm of all numerical features #### BYO dataset This implementation supports using other datasets thanks to BYO dataset functionality. The BYO dataset functionality allows users to plug in their dataset in a common fashion for all Recommender models that support this functionality. Using BYO dataset functionality, the user does not have to modify the source code of the model thanks to the Feature Specification file. For general information on how BYO dataset works, refer to the [BYO dataset overview section](#byo-dataset-functionality-overview). There are three ways to plug in user's dataset: <details> <summary><b>1. Provide an unprocessed dataset in a format matching the one used by Criteo 1TB, then use Criteo 1TB's preprocessing. Feature Specification file is then generated automatically.</b></summary> The required format of the user's dataset is: The data should be split into text files. Each line of those text files should contain a single training example. An example should consist of multiple fields separated by tabulators: * The first field is the label – 1 for a positive example and 0 for negative. * The next N tokens should contain the numerical features separated by tabs. * The next M tokens should contain the hashed categorical features separated by tabs. The correct dataset files together with the Feature Specification yaml file will be generated automatically by preprocessing script. For an example of using this process, refer to the [Quick Start Guide](#quick-start-guide) </details> <details> <summary><b>2. Provide a CSV containing preprocessed data and a simplified Feature Specification yaml file, then transcode the data with `transcode.py` script </b> </summary> This option should be used if the user has their own CSV file with a preprocessed dataset they want to train on. The required format of the user's dataset is: * CSV files containing the data, already split into train and test sets. * Feature Specification yaml file describing the layout of the CSV data For an example of a feature specification file, refer to the `tests/transcoding` folder. The CSV containing the data: * should be already split into train and test * should contain no header * should contain one column per feature, in the order specified by the list of features for that chunk in the source_spec section of the feature specification file * categorical features should be non-negative integers in the range [0,cardinality-1] if cardinality is specified The Feature Specification yaml file: * needs to describe the layout of data in CSV files * may contain information about cardinalities. However, if set to `auto`, they will be inferred from the data by the transcoding script. Refer to `tests/transcoding/small_csv.yaml` for an example of the yaml Feature Specification. The following example shows how to use this way of plugging user's dataset: Prepare your data and save the path: ```bash DATASET_PARENT_DIRECTORY=/raid/dlrm ``` Build the DLRM image with: ```bash docker build -t nvidia_dlrm_pyt . ``` Launch the container with: ```bash docker run --runtime=nvidia -it --rm --ipc=host -v ${DATASET_PARENT_DIRECTORY}:/data nvidia_dlrm_preprocessing bash ``` If you are just testing the process, you can create synthetic csv data: ```bash python -m dlrm.scripts.gen_csv --feature_spec_in tests/transcoding/small_csv.yaml ``` Convert the data: ```bash mkdir /data/conversion_output python -m dlrm.scripts.transcode --input /data --output /data/converted ``` You may need to tune the --chunk_size parameter. Higher values speed up the conversion but require more RAM. This will convert the data from `/data` and save the output in `/data/converted`. A feature specification file describing the new data will be automatically generated. To run the training on 1 GPU: ```bash python -m dlrm.scripts.main --mode train --dataset /data/converted --amp --cuda_graphs ``` - multi-GPU for DGX A100: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=dgxa100_ccx.sh --mem=dgxa100_ccx.sh python -m dlrm.scripts.main \ --dataset /data/converted --seed 0 --epochs 1 --amp --cuda_graphs' ``` - multi-GPU for DGX-1 and DGX-2: ```bash python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=exclusive -- python -m dlrm.scripts.main \ --dataset /data/converted --seed 0 --epochs 1 --amp --cuda_graphs' ``` </details> <details> <summary><b>3. Provide a fully preprocessed dataset, saved in split binary files, and a Feature Specification yaml file</b></summary> This is the option to choose if you want full control over preprocessing and/or want to preprocess data directly to the target format. Your final output will need to contain a Feature Specification yaml describing data and file layout. For an example feature specification file, refer to `tests/feature_specs/criteo_f15.yaml` For details, refer to the [BYO dataset overview section](#byo-dataset-functionality-overview). </details> ##### Channel definitions and requirements This model defines three channels: - categorical, accepting an arbitrary number of features - numerical, accepting an arbitrary number of features - label, accepting a single feature The training script expects two mappings: - train - test For performance reasons: * The only supported dataset type is split binary * Splitting chunks into multiple files is not supported. * Each categorical feature has to be provided in a separate chunk * All numerical features have to be provided in a single chunk * All numerical features have to appear in the same order in channel_spec and source_spec * Only integer types are supported for categorical features * Only float16 is supported for numerical features ##### BYO dataset constraints for the model There are the following constraints of BYO dataset functionality for this model: 1. The performance of the model depends on the dataset size. Generally, the model should scale better for datasets containing more data points. For a smaller dataset, you might experience slower performance than the one reported for Criteo 2. Using other datasets might require tuning some hyperparameters (for example, learning rate, beta1 and beta2) to reach desired accuracy. 3. The optimized cuda interaction kernels for FP16 and TF32 assume that the number of categorical variables is smaller than WARP_SIZE=32 and embedding size is <=128 #### Preprocessing The preprocessing scripts provided in this repository support running both on CPU and GPU using [NVtabular](https://developer.nvidia.com/blog/announcing-the-nvtabular-open-beta-with-multi-gpu-support-and-new-data-loaders/) (GPU only) and [Apache Spark 3.0](https://www.nvidia.com/en-us/deep-learning-ai/solutions/data-science/apache-spark-3/). Please note that the preprocessing will require about 4TB of disk storage. The syntax for the preprocessing script is as follows: ```bash cd /workspace/dlrm/preproc ./prepare_dataset.sh <frequency_threshold> <GPU|CPU> <NVTabular|Spark> ``` For the Criteo Terabyte dataset, we recommend a frequency threshold of `FL=3`(when using A100 40GB or V100 32 GB) or `FL=2`(when using A100 80GB) if you intend to run the hybrid-parallel mode on multiple GPUs. If you want to make the model fit into a single NVIDIA Tesla V100-32GB, you can set `FL=15`. The first argument means the frequency threshold to apply to the categorical variables. For a frequency threshold `FL`, the categorical values that occur less often than `FL` will be replaced with one special value for each category. Thus, a larger value of `FL` will require smaller embedding tables and will substantially reduce the overall size of the model. The second argument is the hardware to use (either GPU or CPU). The third arguments is a framework to use (either NVTabular or Spark). In case of choosing a CPU preprocessing this argument is omitted as it only Apache Spark is supported on CPU. The preprocessing scripts make use of the following environment variables to configure the data directory paths: - `download_dir` – this directory should contain the original Criteo Terabyte CSV files - `spark_output_path` – directory to which the parquet data will be written - `conversion_intermediate_dir` – directory used for storing intermediate data used to convert from parquet to train-ready format - `final_output_dir` – directory to store the final results of the preprocessing which can then be used to train DLRM In the `final_output_dir` will be three subdirectories created: `train`, `test`, `validation`, and one json file &ndash; `model_size.json` &ndash; containing a maximal index of each category. The `train` is the train dataset transformed from day_0 to day_22. The `test` is the test dataset transformed from the prior half of day_23. The `validation` is the dataset transformed from the latter half of day_23. The model is tested on 3 datasets resulting from Criteo dataset preprocessing: small (Freqency threshold = 15), large (Freqency threshold = 3) and xlarge (Freqency threshold = 2). Each dataset occupies approx 370GB of disk space. Table below presents information on the supercomputer and GPU count that are needed to train model on particular dataset. | Dataset | GPU VRAM consumption\* | Model checkpoint size\* | FL setting | DGX A100 40GB, 1GPU | DGX A100 40GB, 8GPU | DGX A100 80GB, 1GPU | DGX A100 80GB, 8GPU | DGX-1** or DGX-2, 1 GPU | DGX-1** or DGX-2, 8GPU | DGX-2, 16GPU | | ------- | ---------------------- | ----------------------- | ---------- | -------------------- | -------------------- | -------------------- | -------------------- | ---------------------- | --------------------- | ------------ | | small (FL=15) | 20.5 | 15.0 | 15 | Yes | Yes | Yes | Yes | Yes | Yes | Yes | | large (FL=3) | 132.3 | 81.9 | 3 | NA | Yes | NA | Yes | NA | Yes | Yes | | xlarge (FL=2) | 198.8 | 141.3 | 2 | NA | NA | NA | Yes | NA | NA | NA | \*with default embedding dimension setting \**DGX-1 V100 32GB ##### NVTabular NVTabular preprocessing is calibrated to run on [DGX A100](https://www.nvidia.com/en-us/data-center/dgx-a100/) and [DGX-2](https://www.nvidia.com/en-us/data-center/dgx-2/) AI systems. However, it should be possible to change the values of `ALL_DS_MEM_FRAC`, `TRAIN_DS_MEM_FRAC`, `TEST_DS_MEM_FRAC`, `VALID_DS_MEM_FRAC` in `preproc/preproc_NVTabular.py`, so that they'll work on also on other hardware platforms such as DGX-1 or a custom one. ##### Spark The script `spark_data_utils.py` is a PySpark application, which is used to preprocess the Criteo Terabyte Dataset. In the Docker image, we have installed Spark 3.0.1, which will start a standalone cluster of Spark. The scripts `run_spark_cpu.sh` and `run_spark_gpu.sh` start Spark, then run several PySpark jobs with `spark_data_utils.py`. Note that the Spark job requires about 3TB disk space used for data shuffling. Spark preprocessing is calibrated to run on [DGX A100](https://www.nvidia.com/en-us/data-center/dgx-a100/) and [DGX-2](https://www.nvidia.com/en-us/data-center/dgx-2/) AI systems. However, it should be possible to change the values in `preproc/DGX-2_config.sh` or `preproc/DGX-A100_config.sh` so that they'll work on also on other hardware platforms such as DGX-1 or a custom one. ### Training process The main training script resides in `dlrm/scripts/main.py`. Once the training is completed, it stores the checkpoint in the path specified by `--save_checkpoint_path` and a JSON training log in `--log_path`. The quality of the predictions generated by the model is measured by the [ROC AUC metric](https://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics). The speed of training and inference is measured by throughput i.e., the number of samples processed per second. We use mixed precision training with static loss scaling for the bottom and top MLPs while embedding tables are stored in FP32 format. ### Inference process This section describes inference with PyTorch in Python. If you're interested in inference using the Triton Inference Server, refer to [triton/README.md](triton/README.md) file. Two modes for inference are currently supported by the `dlrm/scripts/main.py` script: 1. Inference benchmark – this mode will measure and print out throughput and latency numbers for multiple batch sizes. You can activate it by passing the `--mode inference_benchmark` command line flag. The batch sizes to be tested can be set with the `--inference_benchmark_batch_sizes` command-line argument. 2. Test-only – this mode can be used to run a full validation on a checkpoint to measure ROC AUC. You can enable it by passing `--mode test`. ### Deploying DLRM Using NVIDIA Triton Inference Server The NVIDIA Triton Inference Server provides a cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any model being managed by the server. More information on how to perform inference using NVIDIA Triton Inference Server can be found in [triton/README.md](triton/README.md). ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark the training performance on a specific batch size, please follow the instructions in the [Quick Start Guide](#quick-start-guide). You can also add the `--max_steps 1000 --benchmark_warmup_steps 500` if you want to get a reliable throughput measurement without running the entire training. You can create a synthetic dataset by running `python -m dlrm.scripts.prepare_synthetic_dataset --synthetic_dataset_dir /tmp/dlrm_synthetic_data` if you haven't yet downloaded the dataset. #### Inference performance benchmark To benchmark the inference performance on a specific batch size, run: ``` python -m dlrm.scripts.main --mode inference_benchmark --dataset /data ``` You can also create a synthetic dataset by running `python -m dlrm.scripts.prepare_synthetic_dataset --synthetic_dataset_dir /tmp/dlrm_synthetic_data` if you haven't yet downloaded the dataset. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. We used three model size variants to show memory scalability in a multi-GPU setup: | Model variant | Frequency threshold | Model size |---:|---|---| |small | 15 | 15 GB | |large | 3 | 82 GB | |xlarge| 2 | 142 GB| #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) Our results were obtained by running `dlrm/scripts/main.py` script as described in the Quick Start Guide in the DLRM Docker container using NVIDIA A100 80GB GPUs. | GPUs | Model size | Batch size / GPU | Accuracy (AUC) - TF32 | Accuracy (AUC) - mixed precision | Time to train - TF32] | Time to train - mixed precision | Time to train speedup (TF32 to mixed precision) | |-------:|:-------------|:-------------------|------------------------:|-----------------------------------:|:------------------------|:----------------------------------|--------------------------------------------------:| | 8 | large | 8k | 0.802509 | 0.802528 | 0:06:27 | 0:04:36 | 1.40217 | | 1 | small | 64k | 0.802537 | 0.802521 | 0:24:26 | 0:17:47 | 1.37395 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 32GB) Our results were obtained by running `dlrm/scripts/main.py` script as described in the Quick Start Guide in the DLRM Docker container using NVIDIA V100 32GB GPUs. | GPUs | Model size | Batch size / GPU | Accuracy (AUC) - FP32 | Accuracy (AUC) - mixed precision | Time to train - FP32] | Time to train - mixed precision | Time to train speedup (FP32 to mixed precision) | |-------:|:-------------|:-------------------|------------------------:|-----------------------------------:|:------------------------|:----------------------------------|--------------------------------------------------:| | 8 | large | 8k | 0.802568 | 0.802562 | 0:28:24 | 0:11:45 | 2.41702 | | 1 | small | 64k | 0.802784 | 0.802723 | 1:58:10 | 0:38:17 | 3.08663 | ##### Training accuracy plots Models trained with FP32, TF32, and Automatic Mixed Precision (AMP) achieve similar accuracy. The plot represents ROC AUC metric as a function of steps (step is single batch) during training for default precision (FP32 for Volta architecture (DGX-1) and TF32 for Ampere GPU architecture (DGX-A100)), and AMP for all three datasets. All other parameters of training are default. <p align="center"> <img width="100%" src="./img/learning_curve_FL3.svg" /> <br> Figure 1. Training stability for a FL3 dataset: distribution of ROC AUC across different configurations. 'All configurations' refer to the distribution of ROC AUC for cartesian product of architecture, training precision. </a> </p> <p align="center"> <img width="100%" src="./img/learning_curve_FL15.svg" /> <br> Figure 2. Training stability for a FL15 dataset: distribution of ROC AUC across different configurations. 'All configurations' refer to the distribution of ROC AUC for cartesian product of architecture, training precision. </a> </p> ##### Training stability test Training of the model is stable for multiple configurations achieving a standard deviation of 10e-4. The model achieves similar ROC AUC scores for A100 and V100, training precisions. It was trained for one epoch (roughly 4 billion samples, 64014 batches), starting from 10 different initial random seeds for each setup. The training was performed in the pytorch:21.10-py3 NGC container with and without mixed precision enabled. The provided charts and numbers consider single and multi GPU training. After training, the models were evaluated on the test set. The following plots compare distributions of ROC AUC on the test set. <p align="center"> <img width="100%" height="50%" src="./img/training_stability_FL3_21_10.svg" /> <br> Figure 3. Training stability for a FL3 dataset: distribution of ROC AUC across different configurations. 'All configurations' refer to the distribution of ROC AUC for cartesian product of architecture, training precision. </a> </p> <p align="center"> <img width="100%" src="./img/training_stability_FL15_21_10.svg" /> <br> Figure 4. Training stability for a FL15 dataset: distribution of ROC AUC across different configurations. 'All configurations' refer to the distribution of ROC AUC for cartesian product of architecture, training precision. </a> </p> ##### Impact of mixed precision on training accuracy The accuracy of training, measured with ROC AUC on the test set after the final epoch metric was not impacted by enabling mixed precision. The obtained results were statistically similar. The similarity was measured according to the following procedure: The model was trained 10 times for default settings (FP32 or TF32 for Volta and Ampere architecture respectively) and 10 times for AMP. After the last epoch, the accuracy score ROC AUC was calculated on the test set. Distributions for two hardware configurations (A100, V100) for 2 datasets are presented below. <p align="center"> <img width="100%" src="./img/amp_impact_fl3_21_10.svg" /> <br> Figure 5. Impact of AMP on ROC AUC distribution for A100 and V100 GPUs for single- and multi-gpu training on a dataset with a frequency threshold of 3. </a> </p> <p align="center"> <img width="100%" src="./img/amp_impact_fl15_21_10.svg" /> <br> Figure 6. Impact of AMP on ROC AUC distribution for A100 and V100 GPUs for single- and multi-gpu training on a dataset with a frequency threshold of 15. </a> </p> Distribution of AUC ROC for single precision training (TF32 for A100, FP32 for Volta) and AMP training were compared in terms of mean, variance and [Kolmogorov–Smirnov test](https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test) to state statistical difference between single precision and AMP results. Refer to the expandable table below. <details> <summary>Full tabular data for AMP influence on AUC ROC</summary> | Hardware | Dataset | GPUs | mean AUC ROC for full precision | std AUC ROC for full precision | mean AUC ROC for AMP | std AUC ROC for AMP | KS test value: statictics, p-value | |:-----------|:----------|:-------|:----------------------------------|:---------------------------------|:-----------------------|:----------------------|:-------------------------------------| | DGX A100 | FL3 | 8 | 0.802681 | 0.000073 | 0.802646 | 0.000063 | ('0.400', '0.418') | | DGX-2 | FL3 | 16 | 0.802614 | 0.000073 | 0.802623 | 0.000122 | ('0.267', '0.787') | Sample size was set to 10 experiments for each training setup. </details> #### Training performance results We used throughput in items processed per second as the performance metric. ##### Training performance: NVIDIA DGX A100 (8x A100 80GB) Our results were obtained by running the following commands: - for single-GPU setup: ``` python -m dlrm.scripts.main --dataset /data --amp --cuda_graphs ``` - for multi-GPU setup: ``` python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=dgxa100_ccx.sh --mem=dgxa100_ccx.sh python -m dlrm.scripts.main \ --dataset /data --amp --cuda_graphs' ``` in the DLRM Docker container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in records of data per second) were averaged over an entire training epoch. | GPUs | Model size | Batch size / GPU | Throughput - TF32 | Throughput - mixed precision | Throughput speedup (TF32 to mixed precision) | |-------:|:-------------|:-------------------|--------------------:|-------------------------------:|-----------------------------------------------:| | 8 | large | 8k | 11,400,000 | 16,500,000 | 1.447 | | 1 | small | 64k | 2,880,000 | 4,020,000 | 1.396 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training performance: NVIDIA DGX-1 (8x V100 32GB) Our results were obtained by running the following commands: - for single-GPU: ``` python -m dlrm.scripts.main --mode train --dataset /data --amp --cuda_graphs ``` - for multi-GPU : ``` python -m torch.distributed.launch --no_python --use_env --nproc_per_node 8 \ bash -c './bind.sh --cpu=exclusive -- python -m dlrm.scripts.main \ --dataset /data --amp --cuda_graphs' ``` in the DLRM Docker container on NVIDIA DGX-1 with (8x V100 32GB) GPUs. Performance numbers (in records of data per second) were averaged over an entire training epoch. | GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | |-------:|:-------------|:-------------------|--------------------:|-------------------------------:|-----------------------------------------------:| | 8 | large | 8k | 2,880,000 | 6,920,000 | 2.403 | | 1 | small | 64k | 672,000 | 2,090,000 | 3.110 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training performance: NVIDIA DGX-2 (16x V100 32GB) Our results were obtained by running the following commands: - for single-GPU: ``` python -m dlrm.scripts.main --dataset /data --amp --cuda_graphs ``` - for multi-GPU: ``` python -m torch.distributed.launch --no_python --use_env --nproc_per_node [8/16] \ bash -c './bind.sh --cpu=exclusive -- python -m dlrm.scripts.main \ --dataset /data --amp --cuda_graphs' ``` in the DLRM Docker container on NVIDIA DGX-2 with (16x V100 32GB) GPUs. Performance numbers (in records of data per second) were averaged over an entire training epoch. | GPUs | Model size | Batch size / GPU | Throughput - FP32 | Throughput - mixed precision | Throughput speedup (FP32 to mixed precision) | |-------:|:-------------|:-------------------|--------------------:|-------------------------------:|-----------------------------------------------:| | 16 | large | 4k | 4,740,000 | 10,800,000 | 2.278 | | 8 | large | 8k | 3,330,000 | 7,930,000 | 2.381 | | 1 | small | 64k | 717,000 | 2,250,000 | 3.138 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). #### Inference performance results ##### Inference performance: NVIDIA A100 (1x A100 80GB) Our results were obtained by running the --inference_benchmark mode in the DLRM Docker container on on the NVIDIA A100 (1x A100 80GB) GPU. <table> <tbody> <tr><td></td><td colspan="4" style="text-align:center"><b>Mixed Precision</b></td><td colspan="4" style="text-align:center"><b>TF32</b></td></tr> <tr><td></td><td colspan="2" style="text-align:center"><b>CUDA Graphs ON</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs OFF</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs ON</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs OFF</b></td></tr> <tr><td><b>Batch size</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td></tr> <tr><td style="text-align: right;">32768</td><td style="text-align: right;">14,796,024</td><td style="text-align: right;">0.00221</td><td style="text-align: right;">14,369,047</td><td style="text-align: right;">0.00228</td><td style="text-align: right;">8,832,225</td><td style="text-align: right;">0.00371</td><td style="text-align: right;">8,637,000</td><td style="text-align: right;">0.00379</td></tr> <tr><td style="text-align: right;">16384</td><td style="text-align: right;">14,217,340</td><td style="text-align: right;">0.00115</td><td style="text-align: right;">13,673,623</td><td style="text-align: right;">0.00120</td><td style="text-align: right;">8,540,191</td><td style="text-align: right;">0.00192</td><td style="text-align: right;">8,386,694</td><td style="text-align: right;">0.00195</td></tr> <tr><td style="text-align: right;"> 8192</td><td style="text-align: right;">12,769,583</td><td style="text-align: right;">0.00064</td><td style="text-align: right;">11,336,204</td><td style="text-align: right;">0.00072</td><td style="text-align: right;">7,658,459</td><td style="text-align: right;">0.00107</td><td style="text-align: right;">7,463,740</td><td style="text-align: right;">0.00110</td></tr> <tr><td style="text-align: right;"> 4096</td><td style="text-align: right;">10,556,140</td><td style="text-align: right;">0.00039</td><td style="text-align: right;"> 8,203,285</td><td style="text-align: right;">0.00050</td><td style="text-align: right;">6,777,965</td><td style="text-align: right;">0.00060</td><td style="text-align: right;">6,142,076</td><td style="text-align: right;">0.00067</td></tr> <tr><td style="text-align: right;"> 2048</td><td style="text-align: right;"> 8,415,889</td><td style="text-align: right;">0.00024</td><td style="text-align: right;"> 4,785,479</td><td style="text-align: right;">0.00043</td><td style="text-align: right;">5,214,990</td><td style="text-align: right;">0.00039</td><td style="text-align: right;">4,365,954</td><td style="text-align: right;">0.00047</td></tr> <tr><td style="text-align: right;"> 1024</td><td style="text-align: right;"> 5,045,754</td><td style="text-align: right;">0.00020</td><td style="text-align: right;"> 2,357,953</td><td style="text-align: right;">0.00043</td><td style="text-align: right;">3,854,504</td><td style="text-align: right;">0.00027</td><td style="text-align: right;">2,615,601</td><td style="text-align: right;">0.00039</td></tr> <tr><td style="text-align: right;"> 512</td><td style="text-align: right;"> 3,168,261</td><td style="text-align: right;">0.00016</td><td style="text-align: right;"> 1,190,989</td><td style="text-align: right;">0.00043</td><td style="text-align: right;">2,441,310</td><td style="text-align: right;">0.00021</td><td style="text-align: right;">1,332,944</td><td style="text-align: right;">0.00038</td></tr> <tr><td style="text-align: right;"> 256</td><td style="text-align: right;"> 1,711,749</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 542,310</td><td style="text-align: right;">0.00047</td><td style="text-align: right;">1,365,320</td><td style="text-align: right;">0.00019</td><td style="text-align: right;"> 592,034</td><td style="text-align: right;">0.00043</td></tr> <tr><td style="text-align: right;"> 128</td><td style="text-align: right;"> 889,777</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 274,223</td><td style="text-align: right;">0.00047</td><td style="text-align: right;"> 790,984</td><td style="text-align: right;">0.00016</td><td style="text-align: right;"> 300,908</td><td style="text-align: right;">0.00043</td></tr> <tr><td style="text-align: right;"> 64</td><td style="text-align: right;"> 459,728</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 136,180</td><td style="text-align: right;">0.00047</td><td style="text-align: right;"> 416,463</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 150,382</td><td style="text-align: right;">0.00043</td></tr> <tr><td style="text-align: right;"> 32</td><td style="text-align: right;"> 222,386</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 70,107</td><td style="text-align: right;">0.00046</td><td style="text-align: right;"> 174,163</td><td style="text-align: right;">0.00018</td><td style="text-align: right;"> 75,768</td><td style="text-align: right;">0.00042</td></tr> <tr><td style="text-align: right;"> 16</td><td style="text-align: right;"> 117,386</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 34,983</td><td style="text-align: right;">0.00046</td><td style="text-align: right;"> 108,992</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 38,369</td><td style="text-align: right;">0.00042</td></tr> <tr><td style="text-align: right;"> 8</td><td style="text-align: right;"> 59,200</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 18,852</td><td style="text-align: right;">0.00042</td><td style="text-align: right;"> 55,661</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 19,440</td><td style="text-align: right;">0.00041</td></tr> <tr><td style="text-align: right;"> 4</td><td style="text-align: right;"> 29,609</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 8,505</td><td style="text-align: right;">0.00047</td><td style="text-align: right;"> 27,957</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 10,206</td><td style="text-align: right;">0.00039</td></tr> <tr><td style="text-align: right;"> 2</td><td style="text-align: right;"> 14,066</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 4,610</td><td style="text-align: right;">0.00043</td><td style="text-align: right;"> 13,010</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 5,229</td><td style="text-align: right;">0.00038</td></tr> </tbody> </table> To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Inference performance: NVIDIA DGX-1 (1x V100 32GB) Our results were obtained by running the `--inference_benchmark` mode in the DLRM Docker container on NVIDIA DGX-1 with (1x V100 32GB) GPU. <table> <tbody> <tr><td></td><td colspan="4" style="text-align:center"><b>Mixed Precision</b></td><td colspan="4" style="text-align:center"><b>FP32</b></td></tr> <tr><td></td><td colspan="2" style="text-align:center"><b>CUDA Graphs ON</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs OFF</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs ON</b></td><td colspan="2" style="text-align:center"><b>CUDA Graphs OFF</b></td></tr> <tr><td><b>Batch size</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td><td><b>Throughput Avg</b></td><td><b>Latency Avg</b></td></tr> <tr><td style="text-align: right;">32768</td><td style="text-align: right;">6,716,240</td><td style="text-align: right;">0.00488</td><td style="text-align: right;">6,792,739</td><td style="text-align: right;">0.00482</td><td style="text-align: right;">1,809,345</td><td style="text-align: right;">0.01811</td><td style="text-align: right;">1,802,851</td><td style="text-align: right;">0.01818</td></tr> <tr><td style="text-align: right;">16384</td><td style="text-align: right;">6,543,544</td><td style="text-align: right;">0.00250</td><td style="text-align: right;">6,520,519</td><td style="text-align: right;">0.00251</td><td style="text-align: right;">1,754,713</td><td style="text-align: right;">0.00934</td><td style="text-align: right;">1,745,214</td><td style="text-align: right;">0.00939</td></tr> <tr><td style="text-align: right;"> 8192</td><td style="text-align: right;">6,215,194</td><td style="text-align: right;">0.00132</td><td style="text-align: right;">6,074,446</td><td style="text-align: right;">0.00135</td><td style="text-align: right;">1,669,188</td><td style="text-align: right;">0.00491</td><td style="text-align: right;">1,656,393</td><td style="text-align: right;">0.00495</td></tr> <tr><td style="text-align: right;"> 4096</td><td style="text-align: right;">5,230,443</td><td style="text-align: right;">0.00078</td><td style="text-align: right;">4,901,451</td><td style="text-align: right;">0.00084</td><td style="text-align: right;">1,586,666</td><td style="text-align: right;">0.00258</td><td style="text-align: right;">1,574,068</td><td style="text-align: right;">0.00260</td></tr> <tr><td style="text-align: right;"> 2048</td><td style="text-align: right;">4,261,124</td><td style="text-align: right;">0.00048</td><td style="text-align: right;">3,523,239</td><td style="text-align: right;">0.00058</td><td style="text-align: right;">1,462,006</td><td style="text-align: right;">0.00140</td><td style="text-align: right;">1,416,985</td><td style="text-align: right;">0.00145</td></tr> <tr><td style="text-align: right;"> 1024</td><td style="text-align: right;">3,306,724</td><td style="text-align: right;">0.00031</td><td style="text-align: right;">2,047,274</td><td style="text-align: right;">0.00050</td><td style="text-align: right;">1,277,860</td><td style="text-align: right;">0.00080</td><td style="text-align: right;">1,161,032</td><td style="text-align: right;">0.00088</td></tr> <tr><td style="text-align: right;"> 512</td><td style="text-align: right;">2,049,382</td><td style="text-align: right;">0.00025</td><td style="text-align: right;">1,005,919</td><td style="text-align: right;">0.00051</td><td style="text-align: right;">1,016,186</td><td style="text-align: right;">0.00050</td><td style="text-align: right;"> 841,732</td><td style="text-align: right;">0.00061</td></tr> <tr><td style="text-align: right;"> 256</td><td style="text-align: right;">1,149,997</td><td style="text-align: right;">0.00022</td><td style="text-align: right;"> 511,102</td><td style="text-align: right;">0.00050</td><td style="text-align: right;"> 726,349</td><td style="text-align: right;">0.00035</td><td style="text-align: right;"> 485,162</td><td style="text-align: right;">0.00053</td></tr> <tr><td style="text-align: right;"> 128</td><td style="text-align: right;"> 663,048</td><td style="text-align: right;">0.00019</td><td style="text-align: right;"> 264,015</td><td style="text-align: right;">0.00048</td><td style="text-align: right;"> 493,878</td><td style="text-align: right;">0.00026</td><td style="text-align: right;"> 238,936</td><td style="text-align: right;">0.00054</td></tr> <tr><td style="text-align: right;"> 64</td><td style="text-align: right;"> 359,505</td><td style="text-align: right;">0.00018</td><td style="text-align: right;"> 132,913</td><td style="text-align: right;">0.00048</td><td style="text-align: right;"> 295,273</td><td style="text-align: right;">0.00022</td><td style="text-align: right;"> 124,120</td><td style="text-align: right;">0.00052</td></tr> <tr><td style="text-align: right;"> 32</td><td style="text-align: right;"> 175,465</td><td style="text-align: right;">0.00018</td><td style="text-align: right;"> 64,287</td><td style="text-align: right;">0.00050</td><td style="text-align: right;"> 157,629</td><td style="text-align: right;">0.00020</td><td style="text-align: right;"> 63,919</td><td style="text-align: right;">0.00050</td></tr> <tr><td style="text-align: right;"> 16</td><td style="text-align: right;"> 99,207</td><td style="text-align: right;">0.00016</td><td style="text-align: right;"> 31,062</td><td style="text-align: right;">0.00052</td><td style="text-align: right;"> 83,019</td><td style="text-align: right;">0.00019</td><td style="text-align: right;"> 34,660</td><td style="text-align: right;">0.00046</td></tr> <tr><td style="text-align: right;"> 8</td><td style="text-align: right;"> 52,532</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 16,492</td><td style="text-align: right;">0.00049</td><td style="text-align: right;"> 43,289</td><td style="text-align: right;">0.00018</td><td style="text-align: right;"> 17,893</td><td style="text-align: right;">0.00045</td></tr> <tr><td style="text-align: right;"> 4</td><td style="text-align: right;"> 27,626</td><td style="text-align: right;">0.00014</td><td style="text-align: right;"> 8,391</td><td style="text-align: right;">0.00048</td><td style="text-align: right;"> 22,692</td><td style="text-align: right;">0.00018</td><td style="text-align: right;"> 8,923</td><td style="text-align: right;">0.00045</td></tr> <tr><td style="text-align: right;"> 2</td><td style="text-align: right;"> 13,791</td><td style="text-align: right;">0.00015</td><td style="text-align: right;"> 4,146</td><td style="text-align: right;">0.00048</td><td style="text-align: right;"> 11,747</td><td style="text-align: right;">0.00017</td><td style="text-align: right;"> 4,487</td><td style="text-align: right;">0.00045</td></tr> </tbody> </table> To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ## Release notes ### Changelog October 2021 - Added support for CUDA Graphs - Switched to PyTorch native AMP for mixed precision training - Unified the single-GPU and multi-GPU training scripts - Added support for BYO dataset - Updated performance results - Updated container version June 2021 - Updated container version - Updated performance results March 2021 - Added NVTabular as a new preprocessing option - Added a new dataset - xlarge, which uses a frequency threshold of 2 - Introduced a new GPU - A100 80GB, and its performance results - Updated Spark preprocessing - Added Adam as an optional optimizer for embedding and MLPs, for multi-GPU training - Improved README August 2020 - Preprocessing with Spark 3 on GPU - Multiple performance optimizations - Automatic placement and load balancing of embedding tables - Improved README June 2020 - Updated performance tables to include A100 results and multi-GPU setup - Multi-GPU optimizations May 2020 - Performance optimizations April 2020 - Initial release ### Known issues - Adam optimizer performance is not optimized. - For some seeds, the model's loss can become NaN due to aggressive learning rate schedule. - Custom dot interaction kernels for FP16 and TF32 assume that embedding size <= 128 and number of categorical variables < 32. Pass `--interaction_op dot` to use the slower native operation in those cases.
PyTorch/SpeechSynthesis/FastPitch/common
common
env
import os import shutil from collections import defaultdict class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self class DefaultAttrDict(defaultdict): def __init__(self, *args, **kwargs): super(DefaultAttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def __getattr__(self, item): return self[item] def build_env(config, config_name, path): t_path = os.path.join(path, config_name) if config != t_path: os.makedirs(path, exist_ok=True) shutil.copyfile(config, os.path.join(path, config_name))
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp32/decoder-ts-script
decoder-ts-script
config
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. name: "decoder-ts-script" platform: "pytorch_libtorch" default_model_filename: "model.pt" max_batch_size: 64 input [ { name: "input__0" data_type: TYPE_FP32 dims: [ -1, 29 ] } ] output [ { name: "output__0" data_type: TYPE_INT32 dims: [-1] } ]
PyTorch/Translation/GNMT/scripts/tests
tests
train_full
#!/bin/bash # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. set -e DATASET_DIR='data/wmt16_de_en' REPO_DIR='/workspace/gnmt' REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_training_performance MATH=$1 if [[ ${MATH} != "fp16" && ${MATH} != "fp32" && ${MATH} != "tf32" ]]; then echo "Unsupported option for MATH, use either 'fp16' or 'fp32' or 'tf32'" exit 1 fi PERF_TOLERANCE=0.9 GPU_NAME=`nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq` echo 'GPU_NAME:' ${GPU_NAME} GPU_COUNT=`nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l` echo 'GPU_COUNT:' ${GPU_COUNT} if [[ ${GPU_COUNT} -eq 1 || ${GPU_COUNT} -eq 2 || ${GPU_COUNT} -eq 4 || ${GPU_COUNT} -eq 8 ]]; then GLOBAL_BATCH_SIZE=1024 elif [ ${GPU_COUNT} -eq 16 ]; then GLOBAL_BATCH_SIZE=2048 else echo "Unsupported number of GPUs" exit 1 fi REFERENCE_PERF=`grep "${MATH},${GPU_COUNT},${GPU_NAME}" \ ${REFERENCE_FILE} | \cut -f 4 -d ','` if [ -z "${REFERENCE_PERF}" ]; then echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG" TARGET_PERF='' else PERF_THRESHOLD=$(awk 'BEGIN {print ('${REFERENCE_PERF}' * '${PERF_TOLERANCE}')}') TARGET_PERF='--target-perf '${PERF_THRESHOLD} fi cd $REPO_DIR python3 -m torch.distributed.launch --nproc_per_node=${GPU_COUNT} train.py \ --dataset-dir $DATASET_DIR \ --seed 2 \ --epochs 6 \ --target-bleu 23.80 \ --math ${MATH} \ --train-global-batch-size ${GLOBAL_BATCH_SIZE} \ ${TARGET_PERF}
PyTorch/Detection/SSD
SSD
download_dataset
# Get COCO 2017 data sets COCO_DIR=${1:-"/coco"} dir=$(pwd) mkdir $COCO_DIR; cd $COCO_DIR curl -O http://images.cocodataset.org/zips/train2017.zip; unzip train2017.zip curl -O http://images.cocodataset.org/zips/val2017.zip; unzip val2017.zip curl -O http://images.cocodataset.org/annotations/annotations_trainval2017.zip; unzip annotations_trainval2017.zip cd $dir
TensorFlow2/LanguageModeling/ELECTRA/scripts/configs
configs
squad_config
#!/usr/bin/env bash # Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. dgxa100_8gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="512" learning_rate="8e-4" precision="amp" num_gpu="8" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgxa100_8gpu_tf32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="512" learning_rate="8e-4" precision="tf32" num_gpu="8" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } # Full SQuAD training configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU) dgx2_16gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="256" learning_rate="1e-3" precision="amp" num_gpu="16" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgx2_16gpu_fp32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="256" learning_rate="1e-3" precision="fp32" num_gpu="16" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } # Full SQuAD training configs for NVIDIA DGX-1 (8x NVIDIA V100 16GB GPU) dgx1_8gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="16" infer_batch_size="256" learning_rate="4e-4" precision="amp" num_gpu="8" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgx1_8gpu_fp32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="8" infer_batch_size="256" learning_rate="3e-4" precision="fp32" num_gpu="8" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } #Squad 2.0 dgx1_8gpu_amp_v2 () { electra_model="google/electra-base-discriminator" epochs="3" batch_size="16" infer_batch_size="256" learning_rate="4e-4" precision="amp" num_gpu="8" seed="1" SQUAD_VERSION="2.0" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } # 1GPU configs dgxa100_1gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="512" learning_rate="2e-4" precision="amp" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgxa100_1gpu_tf32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="512" learning_rate="2e-4" precision="tf32" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } # Full SQuAD training configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU) dgx2_1gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="256" learning_rate="2e-4" precision="amp" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgx2_1gpu_fp32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="32" infer_batch_size="256" learning_rate="2e-4" precision="fp32" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } # Full SQuAD training configs for NVIDIA DGX-1 (8x NVIDIA V100 16GB GPU) dgx1_1gpu_amp () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="16" infer_batch_size="256" learning_rate="1e-4" precision="amp" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint } dgx1_1gpu_fp32 () { electra_model="google/electra-base-discriminator" epochs="2" batch_size="8" infer_batch_size="256" learning_rate="1e-4" precision="fp32" num_gpu="1" seed="1" SQUAD_VERSION="1.1" squad_dir="/workspace/electra/data/download/squad/v$SQUAD_VERSION" OUT_DIR="results/" init_checkpoint="checkpoints/electra_base_qa_v2_False_epoch_2_ckpt" echo $electra_model $epochs $batch_size $infer_batch_size $learning_rate \ $precision $num_gpu $seed $SQUAD_VERSION $squad_dir \ $OUT_DIR $init_checkpoint }
TensorFlow/Recommendation/WideAndDeep/trainer
trainer
features
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf LABEL_COLUMN = "label" DISPLAY_ID_COLUMN = 'display_id' IS_LEAK_COLUMN = 'is_leak' DISPLAY_ID_AND_IS_LEAK_ENCODED_COLUMN = 'display_ad_and_is_leak' CATEGORICAL_COLUMNS = [ 'ad_id', 'doc_id', 'doc_event_id', 'ad_advertiser', 'doc_ad_source_id', 'doc_ad_publisher_id', 'doc_event_publisher_id', 'doc_event_source_id', 'event_country', 'event_country_state', 'event_geo_location', 'event_hour', 'event_platform', 'traffic_source'] DOC_CATEGORICAL_MULTIVALUED_COLUMNS = { 'doc_ad_category_id': ['doc_ad_category_id_1', 'doc_ad_category_id_2', 'doc_ad_category_id_3'], 'doc_ad_topic_id': ['doc_ad_topic_id_1', 'doc_ad_topic_id_2', 'doc_ad_topic_id_3'], 'doc_ad_entity_id': ['doc_ad_entity_id_1', 'doc_ad_entity_id_2', 'doc_ad_entity_id_3', 'doc_ad_entity_id_4', 'doc_ad_entity_id_5', 'doc_ad_entity_id_6'], 'doc_event_category_id': ['doc_event_category_id_1', 'doc_event_category_id_2', 'doc_event_category_id_3'], 'doc_event_topic_id': ['doc_event_topic_id_1', 'doc_event_topic_id_2', 'doc_event_topic_id_3'], 'doc_event_entity_id': ['doc_event_entity_id_1', 'doc_event_entity_id_2', 'doc_event_entity_id_3', 'doc_event_entity_id_4', 'doc_event_entity_id_5', 'doc_event_entity_id_6'] } BOOL_COLUMNS = [ 'event_weekend', 'user_has_already_viewed_doc'] INT_COLUMNS = [ 'user_views', 'ad_views', 'doc_views', 'doc_event_days_since_published', 'doc_event_hour', 'doc_ad_days_since_published'] FLOAT_COLUMNS_LOG_BIN_TRANSFORM = [ 'pop_ad_id', 'pop_ad_id_conf_multipl', 'pop_document_id', 'pop_document_id_conf_multipl', 'pop_publisher_id', 'pop_publisher_id_conf_multipl', 'pop_advertiser_id', 'pop_advertiser_id_conf_multipl', 'pop_campain_id', 'pop_campain_id_conf_multipl', 'pop_doc_event_doc_ad', 'pop_doc_event_doc_ad_conf_multipl', 'pop_source_id', 'pop_source_id_conf_multipl', 'pop_source_id_country', 'pop_source_id_country_conf_multipl', 'pop_entity_id', 'pop_entity_id_conf_multipl', 'pop_entity_id_country', 'pop_entity_id_country_conf_multipl', 'pop_topic_id', 'pop_topic_id_conf_multipl', 'pop_topic_id_country', 'pop_topic_id_country_conf_multipl', 'pop_category_id', 'pop_category_id_conf_multipl', 'pop_category_id_country', 'pop_category_id_country_conf_multipl', 'user_doc_ad_sim_categories', 'user_doc_ad_sim_categories_conf_multipl', 'user_doc_ad_sim_topics', 'user_doc_ad_sim_topics_conf_multipl', 'user_doc_ad_sim_entities', 'user_doc_ad_sim_entities_conf_multipl', 'doc_event_doc_ad_sim_categories', 'doc_event_doc_ad_sim_categories_conf_multipl', 'doc_event_doc_ad_sim_topics', 'doc_event_doc_ad_sim_topics_conf_multipl', 'doc_event_doc_ad_sim_entities', 'doc_event_doc_ad_sim_entities_conf_multipl'] FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM = [ 'pop_ad_id_conf', 'pop_document_id_conf', 'pop_publisher_id_conf', 'pop_advertiser_id_conf', 'pop_campain_id_conf', 'pop_doc_event_doc_ad_conf', 'pop_source_id_conf', 'pop_source_id_country_conf', 'pop_entity_id_conf', 'pop_entity_id_country_conf', 'pop_topic_id_conf', 'pop_topic_id_country_conf', 'pop_category_id_conf', 'pop_category_id_country_conf', 'user_doc_ad_sim_categories_conf', 'user_doc_ad_sim_topics_conf', 'user_doc_ad_sim_entities_conf', 'doc_event_doc_ad_sim_categories_conf', 'doc_event_doc_ad_sim_topics_conf', 'doc_event_doc_ad_sim_entities_conf'] FLOAT_COLUMNS = FLOAT_COLUMNS_LOG_BIN_TRANSFORM + FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM # Let's define the columns we're actually going to use # during training REQUEST_SINGLE_HOT_COLUMNS = [ "doc_event_id", "doc_id", "doc_event_source_id", "event_geo_location", "event_country_state", "doc_event_publisher_id", "event_country", "event_hour", "event_platform", "traffic_source", "event_weekend", "user_has_already_viewed_doc"] REQUEST_MULTI_HOT_COLUMNS = [ "doc_event_entity_id", "doc_event_topic_id", "doc_event_category_id"] REQUEST_NUMERIC_COLUMNS = [ "pop_document_id_conf", "pop_publisher_id_conf", "pop_source_id_conf", "pop_entity_id_conf", "pop_topic_id_conf", "pop_category_id_conf", "pop_document_id", "pop_publisher_id", "pop_source_id", "pop_entity_id", "pop_topic_id", "pop_category_id", "user_views", "doc_views", "doc_event_days_since_published", "doc_event_hour"] ITEM_SINGLE_HOT_COLUMNS = [ "ad_id", "doc_ad_source_id", "ad_advertiser", "doc_ad_publisher_id"] ITEM_MULTI_HOT_COLUMNS = [ "doc_ad_topic_id", "doc_ad_entity_id", "doc_ad_category_id"] ITEM_NUMERIC_COLUMNS = [ "pop_ad_id_conf", "user_doc_ad_sim_categories_conf", "user_doc_ad_sim_topics_conf", "pop_advertiser_id_conf", "pop_campain_id_conf_multipl", "pop_ad_id", "pop_advertiser_id", "pop_campain_id", "user_doc_ad_sim_categories", "user_doc_ad_sim_topics", "user_doc_ad_sim_entities", "doc_event_doc_ad_sim_categories", "doc_event_doc_ad_sim_topics", "doc_event_doc_ad_sim_entities", "ad_views", "doc_ad_days_since_published"] NV_TRAINING_COLUMNS = ( REQUEST_SINGLE_HOT_COLUMNS + REQUEST_MULTI_HOT_COLUMNS + REQUEST_NUMERIC_COLUMNS + ITEM_SINGLE_HOT_COLUMNS + ITEM_MULTI_HOT_COLUMNS + ITEM_NUMERIC_COLUMNS) HASH_BUCKET_SIZES = { 'doc_event_id': 300000, 'ad_id': 250000, 'doc_id': 100000, 'doc_ad_entity_id': 10000, 'doc_event_entity_id': 10000, 'doc_ad_source_id': 4000, 'doc_event_source_id': 4000, 'event_geo_location': 2500, 'ad_advertiser': 2500, 'event_country_state': 2000, 'doc_ad_publisher_id': 1000, 'doc_event_publisher_id': 1000, 'doc_ad_topic_id': 350, 'doc_event_topic_id': 350, 'event_country': 300, 'doc_ad_category_id': 100, 'doc_event_category_id': 100} IDENTITY_NUM_BUCKETS = { 'event_hour': 6, 'event_platform': 3, 'traffic_source': 3, 'event_weekend': 2, 'user_has_already_viewed_doc': 2} EMBEDDING_DIMENSIONS = { 'doc_event_id': 128, 'ad_id': 128, 'doc_id': 128, 'doc_ad_entity_id': 64, 'doc_event_entity_id': 64, 'doc_ad_source_id': 64, 'doc_event_source_id': 64, 'event_geo_location': 64, 'ad_advertiser': 64, 'event_country_state': 64, 'doc_ad_publisher_id': 64, 'doc_event_publisher_id': 64, 'doc_ad_topic_id': 64, 'doc_event_topic_id': 64, 'event_country': 64, 'doc_ad_category_id': 64, 'doc_event_category_id': 64} def get_feature_columns(force_subset=None): # adding the force_subset as a way to directly pass in column changes for testing/profiling deep_columns, wide_columns = [], [] if force_subset is not None: training_columns = force_subset else: training_columns = NV_TRAINING_COLUMNS tf.compat.v1.logging.warn('number of features: {}'.format(len(training_columns))) for column_name in training_columns: if column_name in HASH_BUCKET_SIZES: categorical_column = tf.feature_column.categorical_column_with_hash_bucket( column_name, hash_bucket_size=HASH_BUCKET_SIZES[column_name], dtype=tf.int32) wide_columns.append(categorical_column) elif column_name in IDENTITY_NUM_BUCKETS: categorical_column = tf.feature_column.categorical_column_with_identity( column_name, num_buckets=IDENTITY_NUM_BUCKETS[column_name]) wide_columns.append(categorical_column) else: columns = [] if column_name in FLOAT_COLUMNS_SIMPLE_BIN_TRANSFORM: # add a categorical_column for column_name + "_binned" # just add the regular float column for now columns.append(tf.feature_column.numeric_column( column_name, shape=(1,))) elif column_name in FLOAT_COLUMNS_LOG_BIN_TRANSFORM: # add a categorical_column for column_name + "_log_binned") columns.append(tf.feature_column.numeric_column( column_name + "_log_01scaled", shape=(1,))) elif column_name in INT_COLUMNS: # add a categorical_column for column_name + "_log_int" columns.append(tf.feature_column.numeric_column( column_name + "_log_01scaled", shape=(1,))) for column in columns: wide_columns.append(column) deep_columns.append(column) continue if column_name in EMBEDDING_DIMENSIONS: column = tf.feature_column.embedding_column( categorical_column, dimension=EMBEDDING_DIMENSIONS[column_name], combiner='mean') else: column = tf.feature_column.indicator_column(categorical_column) deep_columns.append(column) tf.compat.v1.logging.warn('deep columns: {}'.format(len(deep_columns))) tf.compat.v1.logging.warn('wide columns: {}'.format(len(wide_columns))) tf.compat.v1.logging.warn( 'wide&deep intersection: {}'.format(len(set(wide_columns).intersection(set(deep_columns))))) return wide_columns, deep_columns
PyTorch/LanguageModeling/BART/configs
configs
config
{ "_num_labels": 3, "activation_dropout": 0.0, "activation_function": "gelu", "add_final_layer_norm": false, "attention_dropout": 0.0, "bos_token_id": 0, "classif_dropout": 0.0, "d_model": 1024, "decoder_attention_heads": 16, "decoder_ffn_dim": 4096, "decoder_layerdrop": 0.0, "decoder_layers": 12, "decoder_start_token_id": 2, "dropout": 0.1, "early_stopping": true, "encoder_attention_heads": 16, "encoder_ffn_dim": 4096, "encoder_layerdrop": 0.0, "encoder_layers": 12, "eos_token_id": 2, "force_bos_token_to_be_generated": true, "id2label": { "0": "LABEL_0", "1": "LABEL_1", "2": "LABEL_2" }, "init_std": 0.02, "is_encoder_decoder": true, "label2id": { "LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2 }, "length_penalty": 2.0, "max_length": 142, "max_position_embeddings": 1024, "min_length": 56, "model_type": "bart", "no_repeat_ngram_size": 3, "normalize_before": false, "num_beams": 4, "num_hidden_layers": 12, "output_past": true, "pad_token_id": 1, "prefix": " ", "scale_embedding": false, "task_specific_params": { "summarization": { "early_stopping": true, "length_penalty": 2.0, "max_length": 142, "min_length": 56, "no_repeat_ngram_size": 3, "num_beams": 4 } }, "vocab_size": 50265 }
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2
tacotron2
decoderInstance
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "decoderInstance.h" #include "checkedCopy.h" #include "cudaUtils.h" #include "dataShuffler.h" #include "trtUtils.h" #include "utils.h" #include <algorithm> #include <cuda_runtime.h> #include <numeric> #include <stdexcept> using namespace nvinfer1; namespace tts { /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ DecoderInstance::DecoderInstance( TRTPtr<ICudaEngine> engine, const int maxChunkSize) : TimedObject("DecoderInstance::infer()"), EngineDriver(std::move(engine)), mContext(getEngine().createExecutionContext()), mMaxChunkSize(maxChunkSize), mNextChunkSize(mMaxChunkSize), mNumChannels( TRTUtils::getBindingSize(getEngine(), OUTPUT_CHANNELS_NAME) - 1), mStopThreshold(0.5), mBatchSize(-1), mSeed(0), mLastChunkSize(getMaxBatchSize(), 0), mDone(getMaxBatchSize(), true), mDropout( getMaxBatchSize(), maxChunkSize, TRTUtils::getBindingSize(getEngine(), INPUT_DROPOUT_NAME), 0.5, 0), mDecoderInputDevice( getMaxBatchSize() * TRTUtils::getBindingSize(getEngine(), INPUT_LASTFRAME_NAME)), mGateOutputDevice(mMaxChunkSize * getMaxBatchSize()), mOutputTransposedDevice( getMaxBatchSize() * mMaxChunkSize * TRTUtils::getBindingSize(getEngine(), OUTPUT_CHANNELS_NAME)), mOutputGateHost(mMaxChunkSize * getMaxBatchSize()) { reset(0); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ void DecoderInstance::infer(cudaStream_t stream, const int batchSize, const float* const inputDevice, const float* const inputProcessedDevice, const float* const inputMaskDevice, const int32_t* const inputLengthHost, const int32_t* const inputLengthDevice, float* const outputDevice) { startTiming(); if (batchSize > getMaxBatchSize()) { throw std::runtime_error("Maximum batch size is " + std::to_string(getMaxBatchSize()) + " but got " + std::to_string(batchSize) + "."); } mBatchSize = batchSize; // generate dropout for entire chunk ahead of time mDropout.generate(mBatchSize, mNextChunkSize, stream); const float* lastFrame = mDecoderInputDevice.data(); for (int chunk = 0; chunk < mNextChunkSize; ++chunk) { float* const thisFrame = mOutputTransposedDevice.data() + mBatchSize * (mNumChannels + 1) * chunk; // convert data to gpu for tensor rt decode( stream, *mContext, mBatchSize, lastFrame, inputDevice, inputProcessedDevice, inputMaskDevice, inputLengthHost, inputLengthDevice, mDropout.get(chunk), thisFrame); lastFrame = thisFrame; } // we need to save the last frame in mDecoderInputDevice for the next run CheckedCopy::deviceToDeviceAsync( mDecoderInputDevice.data(), lastFrame, mBatchSize * (mNumChannels + 1), stream); // transpose my output data from TNC to NCT and pull out gate output DataShuffler::parseDecoderOutput( mOutputTransposedDevice.data(), outputDevice, mGateOutputDevice.data(), mBatchSize, mMaxChunkSize, mNumChannels, stream); CheckedCopy::deviceToHostAsync( mOutputGateHost.data(), mGateOutputDevice.data(), mMaxChunkSize * mBatchSize, stream); CudaUtils::sync(stream); // reduce gate output std::fill(mLastChunkSize.begin(), mLastChunkSize.end(), 0); for (int batchIndex = 0; batchIndex < mBatchSize; ++batchIndex) { for (int chunk = 0; chunk < mNextChunkSize; ++chunk) { if (!mDone[batchIndex]) { ++mLastChunkSize[batchIndex]; if (Utils::sigmoid( mOutputGateHost .data()[batchIndex * mMaxChunkSize + chunk]) > mStopThreshold) { mDone[batchIndex] = true; } } } } stopTiming(); } const int* DecoderInstance::lastChunkSize() const { return mLastChunkSize.data(); } bool DecoderInstance::isAllDone() const { // only reduce active batch return std::accumulate( mDone.cbegin(), mDone.cbegin() + mBatchSize, true, [](const bool a, const bool b) { return a && b; }); } void DecoderInstance::reset(cudaStream_t stream) { mNextChunkSize = mMaxChunkSize; mDropout.reset(mSeed, stream); // relies on zeros CudaUtils::zeroAsync( mDecoderInputDevice.data(), mDecoderInputDevice.size(), stream); std::fill(mDone.begin(), mDone.end(), false); } void DecoderInstance::setNextChunkSize(const int chunkSize) { if (chunkSize > mMaxChunkSize) { throw std::runtime_error( "Invalid next chunk size: " + std::to_string(chunkSize) + " > " + std::to_string(mMaxChunkSize)); } mNextChunkSize = chunkSize; } void DecoderInstance::setSeed(const unsigned int seed) { mSeed = seed; } int DecoderInstance::getNextChunkSize() const { return mNextChunkSize; } int DecoderInstance::getMaxChunkSize() const { return mMaxChunkSize; } } // namespace tts
TensorFlow/LanguageModeling/BERT/data
data
GLUEDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import wget from pathlib import Path def mkdir(path): Path(path).mkdir(parents=True, exist_ok=True) class GLUEDownloader: def __init__(self, save_path): self.save_path = save_path + '/glue' def download(self, task_name): mkdir(self.save_path) if task_name in {'mrpc', 'mnli'}: task_name = task_name.upper() elif task_name == 'cola': task_name = 'CoLA' else: # SST-2 assert task_name == 'sst-2' task_name = 'SST' wget.download( 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/1502038877f6a88c225a34450793fbc3ea87eaba/download_glue_data.py', out=self.save_path, ) sys.path.append(self.save_path) import download_glue_data download_glue_data.main( ['--data_dir', self.save_path, '--tasks', task_name]) sys.path.pop()
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs
feature_specs
13_num_30_cat
channel_spec: categorical: - cat_0.bin - cat_1.bin - cat_2.bin - cat_3.bin - cat_4.bin - cat_5.bin - cat_6.bin - cat_7.bin - cat_8.bin - cat_9.bin - cat_10.bin - cat_11.bin - cat_12.bin - cat_13.bin - cat_14.bin - cat_15.bin - cat_16.bin - cat_17.bin - cat_18.bin - cat_19.bin - cat_20.bin - cat_21.bin - cat_22.bin - cat_23.bin - cat_24.bin - cat_25.bin - cat_26.bin - cat_27.bin - cat_28.bin - cat_29.bin label: - label numerical: &id001 - num_0 - num_1 - num_2 - num_3 - num_4 - num_5 - num_6 - num_7 - num_8 - num_9 - num_10 - num_11 - num_12 feature_spec: cat_0.bin: cardinality: 100000 dtype: int32 cat_1.bin: cardinality: 100001 dtype: int32 cat_10.bin: cardinality: 100010 dtype: int32 cat_11.bin: cardinality: 100011 dtype: int32 cat_12.bin: cardinality: 100012 dtype: int32 cat_13.bin: cardinality: 100013 dtype: int32 cat_14.bin: cardinality: 100014 dtype: int32 cat_15.bin: cardinality: 100015 dtype: int32 cat_16.bin: cardinality: 100016 dtype: int32 cat_17.bin: cardinality: 100017 dtype: int32 cat_18.bin: cardinality: 100018 dtype: int32 cat_19.bin: cardinality: 100019 dtype: int32 cat_2.bin: cardinality: 100002 dtype: int32 cat_20.bin: cardinality: 100020 dtype: int32 cat_21.bin: cardinality: 100021 dtype: int32 cat_22.bin: cardinality: 100022 dtype: int32 cat_23.bin: cardinality: 100023 dtype: int32 cat_24.bin: cardinality: 100024 dtype: int32 cat_25.bin: cardinality: 100025 dtype: int32 cat_26.bin: cardinality: 100026 dtype: int32 cat_27.bin: cardinality: 100027 dtype: int32 cat_28.bin: cardinality: 100028 dtype: int32 cat_29.bin: cardinality: 100029 dtype: int32 cat_3.bin: cardinality: 100003 dtype: int32 cat_4.bin: cardinality: 100004 dtype: int32 cat_5.bin: cardinality: 100005 dtype: int32 cat_6.bin: cardinality: 100006 dtype: int32 cat_7.bin: cardinality: 100007 dtype: int32 cat_8.bin: cardinality: 100008 dtype: int32 cat_9.bin: cardinality: 100009 dtype: int32 label: dtype: bool num_0: dtype: float16 num_1: dtype: float16 num_10: dtype: float16 num_11: dtype: float16 num_12: dtype: float16 num_2: dtype: float16 num_3: dtype: float16 num_4: dtype: float16 num_5: dtype: float16 num_6: dtype: float16 num_7: dtype: float16 num_8: dtype: float16 num_9: dtype: float16 metadata: {} source_spec: test: - features: *id001 files: - test/numerical.bin type: split_binary - features: - label files: - test/label.bin type: split_binary - features: - cat_0.bin files: - test/cat_0.bin type: split_binary - features: - cat_1.bin files: - test/cat_1.bin type: split_binary - features: - cat_2.bin files: - test/cat_2.bin type: split_binary - features: - cat_3.bin files: - test/cat_3.bin type: split_binary - features: - cat_4.bin files: - test/cat_4.bin type: split_binary - features: - cat_5.bin files: - test/cat_5.bin type: split_binary - features: - cat_6.bin files: - test/cat_6.bin type: split_binary - features: - cat_7.bin files: - test/cat_7.bin type: split_binary - features: - cat_8.bin files: - test/cat_8.bin type: split_binary - features: - cat_9.bin files: - test/cat_9.bin type: split_binary - features: - cat_10.bin files: - test/cat_10.bin type: split_binary - features: - cat_11.bin files: - test/cat_11.bin type: split_binary - features: - cat_12.bin files: - test/cat_12.bin type: split_binary - features: - cat_13.bin files: - test/cat_13.bin type: split_binary - features: - cat_14.bin files: - test/cat_14.bin type: split_binary - features: - cat_15.bin files: - test/cat_15.bin type: split_binary - features: - cat_16.bin files: - test/cat_16.bin type: split_binary - features: - cat_17.bin files: - test/cat_17.bin type: split_binary - features: - cat_18.bin files: - test/cat_18.bin type: split_binary - features: - cat_19.bin files: - test/cat_19.bin type: split_binary - features: - cat_20.bin files: - test/cat_20.bin type: split_binary - features: - cat_21.bin files: - test/cat_21.bin type: split_binary - features: - cat_22.bin files: - test/cat_22.bin type: split_binary - features: - cat_23.bin files: - test/cat_23.bin type: split_binary - features: - cat_24.bin files: - test/cat_24.bin type: split_binary - features: - cat_25.bin files: - test/cat_25.bin type: split_binary - features: - cat_26.bin files: - test/cat_26.bin type: split_binary - features: - cat_27.bin files: - test/cat_27.bin type: split_binary - features: - cat_28.bin files: - test/cat_28.bin type: split_binary - features: - cat_29.bin files: - test/cat_29.bin type: split_binary train: - features: *id001 files: - train/numerical.bin type: split_binary - features: - label files: - train/label.bin type: split_binary - features: - cat_0.bin files: - train/cat_0.bin type: split_binary - features: - cat_1.bin files: - train/cat_1.bin type: split_binary - features: - cat_2.bin files: - train/cat_2.bin type: split_binary - features: - cat_3.bin files: - train/cat_3.bin type: split_binary - features: - cat_4.bin files: - train/cat_4.bin type: split_binary - features: - cat_5.bin files: - train/cat_5.bin type: split_binary - features: - cat_6.bin files: - train/cat_6.bin type: split_binary - features: - cat_7.bin files: - train/cat_7.bin type: split_binary - features: - cat_8.bin files: - train/cat_8.bin type: split_binary - features: - cat_9.bin files: - train/cat_9.bin type: split_binary - features: - cat_10.bin files: - train/cat_10.bin type: split_binary - features: - cat_11.bin files: - train/cat_11.bin type: split_binary - features: - cat_12.bin files: - train/cat_12.bin type: split_binary - features: - cat_13.bin files: - train/cat_13.bin type: split_binary - features: - cat_14.bin files: - train/cat_14.bin type: split_binary - features: - cat_15.bin files: - train/cat_15.bin type: split_binary - features: - cat_16.bin files: - train/cat_16.bin type: split_binary - features: - cat_17.bin files: - train/cat_17.bin type: split_binary - features: - cat_18.bin files: - train/cat_18.bin type: split_binary - features: - cat_19.bin files: - train/cat_19.bin type: split_binary - features: - cat_20.bin files: - train/cat_20.bin type: split_binary - features: - cat_21.bin files: - train/cat_21.bin type: split_binary - features: - cat_22.bin files: - train/cat_22.bin type: split_binary - features: - cat_23.bin files: - train/cat_23.bin type: split_binary - features: - cat_24.bin files: - train/cat_24.bin type: split_binary - features: - cat_25.bin files: - train/cat_25.bin type: split_binary - features: - cat_26.bin files: - train/cat_26.bin type: split_binary - features: - cat_27.bin files: - train/cat_27.bin type: split_binary - features: - cat_28.bin files: - train/cat_28.bin type: split_binary - features: - cat_29.bin files: - train/cat_29.bin type: split_binary