relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/SpeechSynthesis/FastPitch | FastPitch | export_torchscript | # *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import torch
from inference import load_and_setup_model
def parse_args(parser):
parser.add_argument('--generator-name', type=str, required=True,
choices=('Tacotron2', 'FastPitch'), help='model name')
parser.add_argument('--generator-checkpoint', type=str, required=True,
help='full path to the generator checkpoint file')
parser.add_argument('-o', '--output', type=str, default="trtis_repo/tacotron/1/model.pt",
help='filename for the Tacotron 2 TorchScript model')
parser.add_argument('--amp', action='store_true',
help='inference with AMP')
return parser
def main():
parser = argparse.ArgumentParser(description='Export models to TorchScript')
parser = parse_args(parser)
args = parser.parse_args()
model = load_and_setup_model(
args.generator_name, parser, args.generator_checkpoint,
args.amp, device='cpu', forward_is_infer=True, polyak=False,
jitable=True)
torch.jit.save(torch.jit.script(model), args.output)
if __name__ == '__main__':
main()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | jsonModelImporter | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "jsonModelImporter.h"
#include <fstream>
namespace tts
{
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
std::string mkString(const char c)
{
return std::string(&c, 1);
}
char nextNonWhiteSpace(std::istream& stream)
{
char c;
do
{
stream.get(c);
if (stream.fail())
{
throw std::runtime_error("Failed to read next char at position " + std::to_string(stream.tellg()) + ".");
}
} while (std::isspace(c));
return c;
}
char peekNextNonWhiteSpace(std::istream& stream)
{
char c;
while (true)
{
c = stream.peek();
if (stream.fail())
{
throw std::runtime_error("Failed to peek at next char at position " + std::to_string(stream.tellg()) + ".");
}
if (!std::isspace(c))
{
break;
}
else
{
// move past this white space character
stream.get(c);
if (stream.fail())
{
throw std::runtime_error(
"Failed to read next char at position " + std::to_string(stream.tellg()) + ".");
}
}
}
return c;
}
void expectNextCharacter(std::istream& stream, const char expected)
{
const char c = nextNonWhiteSpace(stream);
if (c != expected)
{
throw std::runtime_error("Failed to find '" + mkString(expected) + "' (found '" + mkString(c)
+ "' instead at position " + std::to_string(stream.tellg()) + ".");
}
}
std::string readName(std::istream& stream)
{
std::string name;
expectNextCharacter(stream, '"');
std::getline(stream, name, '"');
return name;
}
float readNumber(std::istream& stream)
{
float num;
stream >> num;
return num;
}
void readNextArray(std::istream& stream, std::vector<float>& data)
{
const char c = peekNextNonWhiteSpace(stream);
if (c == '[')
{
nextNonWhiteSpace(stream);
// may be another array potentionally nested inside
while (true)
{
char c = peekNextNonWhiteSpace(stream);
if (c == '[')
{
// recurse
readNextArray(stream, data);
}
else
{
// read actual array
data.emplace_back(readNumber(stream));
}
// next character should be a ',' or a ']'
c = nextNonWhiteSpace(stream);
if (c == ']')
{
// end of array
break;
}
else if (c != ',')
{
throw std::runtime_error(
"Invalid next character '" + mkString(c) + "' at position " + std::to_string(stream.tellg()) + ".");
}
}
}
else
{
data.emplace_back(readNumber(stream));
}
}
std::vector<float> readTensor(std::istream& stream)
{
std::vector<float> data;
readNextArray(stream, data);
return data;
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
JSONModelImporter::JSONModelImporter(const std::string& filename)
: mWeights()
{
std::ifstream fin(filename);
if (!fin.good())
{
throw std::runtime_error("Failed to open '" + filename + "' for reading.");
}
char c;
fin.get(c);
if (c != '{')
{
throw std::runtime_error("First character must be '{', not " + mkString(c));
}
while (true)
{
// loop until we hit an error or the closing '}'
const std::string name = readName(fin);
expectNextCharacter(fin, ':');
std::vector<float> tensor = readTensor(fin);
// all but the last name in the path is the layer name
const size_t layerNameEnd = name.find_last_of(".");
std::string layerName = name.substr(0, layerNameEnd);
const std::string dataName = name.substr(layerNameEnd + 1);
// fix encoder names
for (int i = 0; i < 3; ++i)
{
const std::string oldConvName = "encoder.convolutions." + std::to_string(i) + ".0.conv";
if (layerName == oldConvName)
{
layerName = "encoder.convolutions." + std::to_string(i) + ".conv_layer.conv";
}
const std::string oldBatchName = "encoder.convolutions." + std::to_string(i) + ".1";
if (layerName == oldBatchName)
{
layerName = "encoder.convolutions." + std::to_string(i) + ".batch_norm";
}
}
// fix postnet names
for (int i = 0; i < 5; ++i)
{
const std::string oldConvName = "postnet.convolutions." + std::to_string(i) + ".0.conv";
if (layerName == oldConvName)
{
layerName = "postnet.convolutions." + std::to_string(i) + ".conv_layer.conv";
}
const std::string oldBatchName = "postnet.convolutions." + std::to_string(i) + ".1";
if (layerName == oldBatchName)
{
layerName = "postnet.convolutions." + std::to_string(i) + ".batch_norm";
}
}
auto iter = mWeights.find(layerName);
if (iter == mWeights.end())
{
iter = mWeights.emplace(layerName, std::unique_ptr<LayerData>(new LayerData())).first;
}
else
{
}
iter->second->add(dataName, tensor);
if (peekNextNonWhiteSpace(fin) == '}')
{
break;
}
else
{
expectNextCharacter(fin, ',');
}
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
const LayerData* JSONModelImporter::getWeights(const std::vector<std::string>& path)
{
std::string fullPath;
for (size_t i = 0; i < path.size(); ++i)
{
fullPath += path[i];
if (i + 1 < path.size())
{
fullPath += ".";
}
}
auto iter = mWeights.find(fullPath);
if (iter != mWeights.end())
{
return iter->second.get();
}
else
{
throw std::runtime_error("Unable to find '" + fullPath + "'");
}
}
} // namespace tts
|
PyTorch/Detection/Efficientdet/effdet/object_detection | object_detection | matcher | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import torch
@torch.jit.script
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results: torch.Tensor):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor
"""
if len(match_results.shape) != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype not in (torch.int32, torch.int64):
raise ValueError('match_results should be an int32 or int64 scalar tensor')
self.match_results = match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.match_results > -1).flatten().long()
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self.match_results >= 0
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return self.matched_column_indices().numel()
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.match_results == -1).flatten().long()
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self.match_results == -1
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return self.unmatched_column_indices().numel()
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(self.ignored_column_indicator()).flatten().long()
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column indices.
"""
return self.match_results == -2
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return self.ignored_column_indices().numel()
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return torch.nonzero(0 > self.match_results).flatten().long()
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence with the output of
matched_column_indicator(). For example if self.matched_column_indicator() is [0,2],
and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and
column 2 was matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return torch.gather(self.match_results, 0, self.matched_column_indices()).flatten().long()
def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]].
For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:].
"""
ss = torch.stack([ignored_value, unmatched_value])
input_tensor = torch.cat([ss, input_tensor], dim=0)
gather_indices = torch.clamp(self.match_results + 2, min=0)
gathered_tensor = torch.index_select(input_tensor, 0, gather_indices)
return gathered_tensor
|
TensorFlow/Segmentation/UNet_3D_Medical/scripts | scripts | unet3d_train_full_TF-AMP | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches 3D-UNet run 5-fold cross-validation TF-AMP training for 16000 iterations each.
# Usage:
# bash examples/unet3d_train_full_TF-AMP.sh <number/of/gpus> <path/to/dataset> <path/to/results/directory> <batch/size>
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_dir $3/log.json --exec_mode train_and_evaluate --max_steps 16000 --augment --batch_size $4 --fold 0 --use_xla --use_amp > $3/log_TF-AMP_$1GPU_fold0.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_dir $3/log.json --exec_mode train_and_evaluate --max_steps 16000 --augment --batch_size $4 --fold 1 --use_xla --use_amp > $3/log_TF-AMP_$1GPU_fold1.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_dir $3/log.json --exec_mode train_and_evaluate --max_steps 16000 --augment --batch_size $4 --fold 2 --use_xla --use_amp > $3/log_TF-AMP_$1GPU_fold2.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_dir $3/log.json --exec_mode train_and_evaluate --max_steps 16000 --augment --batch_size $4 --fold 3 --use_xla --use_amp > $3/log_TF-AMP_$1GPU_fold3.txt
horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_dir $3/log.json --exec_mode train_and_evaluate --max_steps 16000 --augment --batch_size $4 --fold 4 --use_xla --use_amp > $3/log_TF-AMP_$1GPU_fold4.txt
python runtime/parse_results.py --model_dir $3 --env TF-AMP_$1GPU
|
PyTorch/Forecasting/TFT/triton/runner | runner | config_NVIDIA-DGX-A100-(1x-A100-80GB) | checkpoints:
- name: electricity_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-electricity/versions/22.11.0_amp/zip
- name: traffic_bin
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/tft_base_pyt_ckpt_ds-traffic/versions/22.11.0_amp/zip
configurations:
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: onnx
export_precision: fp32
format: trt
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: electricity_bin
dataset: electricity_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
- accelerator: none
batch_size:
- 1
- 2
- 4
- 8
- 16
- 32
- 64
- 128
- 256
- 512
- 1024
batch_sizes: 1 2 4 8 16 32 64 128 256 512 1024
capture_cuda_graph: 0
checkpoint_variant: traffic_bin
dataset: traffic_bin
device: gpu
export_format: ts-trace
export_precision: fp32
format: ts-trace
max_batch_size: 1024
precision: fp16
request_count: 500
triton_gpu_engine_count: 2
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 512 1024
container_version: '22.11'
datasets:
- name: electricity_bin
- name: traffic_bin
datasets_dir: datasets
framework: PyTorch
model_name: TFT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/LanguageModeling/BART/scripts/params | params | cnn_dm_params | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# CNN-DM Summarization configurations for NVIDIA DGX A100 (8x NVIDIA A100 40GB GPU)
dgxa100_8gpu_bf16 ()
{
DATA_DIR=data/cnn_dm/
CKPT_PATH=data/nvidia_pretrained/bart_large/
CONFIG_PATH="configs/config.json"
NUM_GPU=8
LR=1.25e-4
BS=40
ACCUM=1
PRECISION="bf16"
TRAIN_STEPS=2000
WARMUP_STEPS=50
MAX_SOURCE_LEN=1024
MAX_TARGET_LEN=142
EVAL_BEAMS=4
EVAL_BS=128
PRED_BS=128
PRELN=true
echo $DATA_DIR $CKPT_PATH $CONFIG_PATH $NUM_GPU $LR $BS $ACCUM $PRECISION $TRAIN_STEPS $WARMUP_STEPS $MAX_SOURCE_LEN $MAX_TARGET_LEN $EVAL_BEAMS $EVAL_BS $PRED_BS $PRELN
}
dgxa100_8gpu_bf16_eval ()
{
DATA_DIR=data/cnn_dm/
CONFIG_PATH="configs/config.json"
NUM_GPU=8
PRECISION="bf16"
MAX_SOURCE_LEN=1024
MAX_TARGET_LEN=142
EVAL_BEAMS=4
PRED_BS=128
echo $PRED_BS $NUM_GPU $PRECISION $EVAL_BEAMS $MAX_SOURCE_LEN $MAX_TARGET_LEN $DATA_DIR $CONFIG_PATH
}
dgxa100_8gpu_tf32 ()
{
DATA_DIR=data/cnn_dm/
CKPT_PATH=data/nvidia_pretrained/bart_large/
CONFIG_PATH="configs/config.json"
NUM_GPU=8
LR=1.25e-4
BS=24
ACCUM=1
PRECISION="tf32"
TRAIN_STEPS=3333
WARMUP_STEPS=50
MAX_SOURCE_LEN=1024
MAX_TARGET_LEN=142
EVAL_BEAMS=4
EVAL_BS=128
PRED_BS=64
PRELN=true
echo $DATA_DIR $CKPT_PATH $CONFIG_PATH $NUM_GPU $LR $BS $ACCUM $PRECISION $TRAIN_STEPS $WARMUP_STEPS $MAX_SOURCE_LEN $MAX_TARGET_LEN $EVAL_BEAMS $EVAL_BS $PRED_BS $PRELN
}
dgxa100_8gpu_tf32_eval ()
{
DATA_DIR=data/cnn_dm/
CONFIG_PATH="configs/config.json"
NUM_GPU=8
PRECISION="tf32"
MAX_SOURCE_LEN=1024
MAX_TARGET_LEN=142
EVAL_BEAMS=4
PRED_BS=64
echo $PRED_BS $NUM_GPU $PRECISION $EVAL_BEAMS $MAX_SOURCE_LEN $MAX_TARGET_LEN $DATA_DIR $CONFIG_PATH
}
|
PaddlePaddle/LanguageModeling/BERT/data | data | Downloader | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from WikiDownloader import WikiDownloader
from BooksDownloader import BooksDownloader
from SquadDownloader import SquadDownloader
class Downloader:
def __init__(self, dataset_name, save_path):
self.dataset_name = dataset_name
self.save_path = save_path
def download(self):
if self.dataset_name == 'bookscorpus':
self.download_bookscorpus()
elif self.dataset_name == 'wikicorpus_en':
self.download_wikicorpus('en')
elif self.dataset_name == 'wikicorpus_zh':
self.download_wikicorpus('zh')
elif self.dataset_name == 'squad':
self.download_squad()
elif self.dataset_name == 'all':
self.download_bookscorpus()
self.download_wikicorpus('en')
self.download_wikicorpus('zh')
self.download_squad()
else:
print(self.dataset_name)
assert False, 'Unknown dataset_name provided to downloader'
def download_bookscorpus(self):
downloader = BooksDownloader(self.save_path)
downloader.download()
def download_wikicorpus(self, language):
downloader = WikiDownloader(language, self.save_path)
downloader.download()
def download_squad(self):
downloader = SquadDownloader(self.save_path)
downloader.download()
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | finetune_vox_960h | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
export OMP_NUM_THREADS=1
export CUDNN_V8_API_ENABLED=1 # For older containers (~22.01)
export TORCH_CUDNN_V8_API_ENABLED=1
# IO
: ${DATASET_DIR:="/datasets/LibriSpeech"}
: ${TRAIN_SUBSET:="train-full-960"}
: ${VALID_SUBSET:="dev-other"}
: ${OUTPUT_DIR:="results/finetune_large_960h"}
# Batching
# To best utilize hw, increase batch size by increasing NUM_CONCAT_BATCHES, and lowering UPDATE_FREQ.
# Keep NUM_NODES x $NUM_GPUS x $NUM_CONCAT_BATCHES x $UPDATE_FREQ = 24.
# Note that this script does not control NUM_NODES.
: ${NUM_GPUS:=8}
: ${MAX_TOKENS:=1280000}
: ${NUM_CONCAT_BATCHES:=3}
: ${UPDATE_FREQ:=1}
# Training
: ${MAX_UPDATE:=320000}
: ${WARMUP_UPDATES:=$(($MAX_UPDATE / 10 * 1))}
: ${HOLD_UPDATES:=$(($MAX_UPDATE / 10 * 4))}
: ${FREEZE_FINETUNE_UPDATES:=10000}
: ${BATCH_SIZE:=}
: ${LEARNING_RATE:=0.00003}
: ${FP16:=false}
: ${BF16:=false}
: ${EMA:=0.0} # XXX
: ${SEED:=1} # XXX
: ${CUDNN_BENCHMARK:=false}
# Model
: ${PRETRAINED_MODEL:=pretrained_models/libri960_big.pt}
: ${MASK_PROB:=0.5}
: ${MASK_CHANNEL_PROB:=0.25}
: ${ENCODER_LAYERDROP:=0.1}
# Misc
: ${NO_SAVE:=false}
: ${SAVE_FREQUENCY:=10}
: ${DISTRIBUTED="-m torch.distributed.launch --nproc_per_node=$NUM_GPUS"}
mkdir -p "$OUTPUT_DIR"
# ARGS+=" --no_epoch_checkpoints"
ARGS+=" --resume"
ARGS+=" --save_frequency $SAVE_FREQUENCY"
ARGS+=" --labels ltr"
ARGS+=" --w2v_path $PRETRAINED_MODEL"
ARGS+=" --data $DATASET_DIR"
ARGS+=" --train_subset $TRAIN_SUBSET"
ARGS+=" --valid_subset $VALID_SUBSET"
ARGS+=" --output_dir $OUTPUT_DIR"
ARGS+=" --ema $EMA"
ARGS+=" --adam_eps 1e-8"
ARGS+=" --lr $LEARNING_RATE"
ARGS+=" --lr_policy exp"
ARGS+=" --initial_lr_scale 0.01"
ARGS+=" --final_lr_scale 0.05"
ARGS+=" --warmup_updates $WARMUP_UPDATES"
ARGS+=" --hold_updates $HOLD_UPDATES"
ARGS+=" --max_update $MAX_UPDATE"
ARGS+=" --num_concat_batches $NUM_CONCAT_BATCHES"
ARGS+=" --update_freq $UPDATE_FREQ "
ARGS+=" --max_tokens $MAX_TOKENS"
ARGS+=" --max_tokens_valid $MAX_TOKENS"
ARGS+=" --freeze_finetune_updates $FREEZE_FINETUNE_UPDATES"
# Overrides
ARGS+=" --apply_mask"
ARGS+=" --mask_prob $MASK_PROB"
ARGS+=" --mask_channel_prob $MASK_CHANNEL_PROB"
ARGS+=" --mask_channel_length 64"
ARGS+=" --encoder_layerdrop $ENCODER_LAYERDROP" # NOTE This is called `layerdrop` in fairseq finetuning yamls
ARGS+=" --activation_dropout 0.1"
ARGS+=" --feature_grad_mult 0.0"
ARGS+=" --dropout_input 0.0"
ARGS+=" --dropout 0.0"
ARGS+=" --weight_decay 0.0"
ARGS+=" --mha pyt"
# float16
[ "$FP16" = true ] && ARGS+=" --fp16"
[ "$FP16" = true ] && ARGS+=" --fp32_cosine_sim"
[ "$FP16" = true ] && ARGS+=" --fp32_conv_norms"
[ "$FP16" = true ] && ARGS+=" --fp32_pos_conv"
# bfloat16
[ "$BF16" = true ] && ARGS+=" --bf16"
[ "$BF16" = true ] && ARGS+=" --fp32_pos_conv"
# Misc
[ -n "$SEED" ] && ARGS+=" --seed $SEED"
[ -n "$EPOCHS_THIS_JOB" ] && ARGS+=" --epochs_this_job $EPOCHS_THIS_JOB"
[ -n "$BATCH_SIZE" ] && ARGS+=" --batch_size $BATCH_SIZE"
[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn_benchmark"
[ "$FP32_TRANSFORMER_LAYERNORM" = true ] && ARGS+=" --fp32_transformer_layernorm"
[ "$FP32_MHA_SOFTMAX" = true ] && ARGS+=" --fp32_mha_softmax"
[ "$FP32_COSINE_SIM" = true ] && ARGS+=" --fp32_cosine_sim"
[ "$FP32_POS_CONV" = true ] && ARGS+=" --fp32_pos_conv"
[ "$FP32_CONV_NORMS" = true ] && ARGS+=" --fp32_conv_norms"
[ "$NO_SAVE" = true ] && ARGS+=" --no_save"
echo -e "\nFP16=$FP16, BP16=$BF16, ${NUM_GPUS}x(${MAX_TOKENS}x${NUM_CONCAT_BATCHES})x${UPDATE_FREQ}\n"
set -x
python3 $DISTRIBUTED train.py finetune $ARGS "$@"
|
Tools/PyTorch/TimeSeriesPredictionPlatform | TimeSeriesPredictionPlatform | .gitignore | .ipynb_checkpoints
__pycache__
/outputs/
*.zip
/datasets/*/
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/inference | inference | proj_adaptive_softmax_jit | # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
class ProjectedAdaptiveLogSoftmax(nn.Module):
out_projs: List[Optional[torch.Tensor]]
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
dtype=None, tie_projs=None, out_layers_weights=None,
out_projs=None, keep_order=False):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(
torch.zeros(
self.n_clusters, self.d_embed,
dtype=dtype,
device=torch.device('cuda'),
)
)
self.cluster_bias = nn.Parameter(
torch.zeros(
self.n_clusters,
dtype=dtype,
device=torch.device('cuda'),
)
)
if not out_layers_weights:
self.out_layers_weights = []
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = []
self.out_projs = []
if div_val == 1:
if d_proj != d_embed:
for i, tie_proj in enumerate(tie_projs):
if tie_proj:
self.out_projs.append(out_projs[0])
else:
self.out_projs.append(
torch.zeros(
d_proj, d_embed,
dtype=dtype,
device=torch.device('cuda'),
)
)
else:
for i, tie_proj in enumerate(tie_projs):
self.out_projs.append(None)
else:
for i, tie_proj in enumerate(tie_projs):
d_emb_i = d_embed // (div_val ** i)
if tie_proj:
self.out_projs.append(out_projs[i])
else:
self.out_projs.append(
torch.zeros(
d_proj, d_emb_i,
dtype=dtype,
device=torch.device('cuda'),
)
)
if div_val == 1:
self.out_layers_biases.append(
torch.zeros(
n_token,
dtype=dtype,
device=torch.device('cuda'),
)
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(
torch.zeros(
n_token, d_embed,
dtype=dtype,
device=torch.device('cuda'),
)
)
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
self.out_layers_biases.append(
nn.Parameter(
torch.zeros(
r_idx - l_idx,
dtype=dtype,
device=torch.device('cuda'),
)
)
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(
torch.zeros(
r_idx - l_idx, d_emb_i,
dtype=dtype,
device=torch.device('cuda'),
)
)
)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj: Optional[torch.Tensor]):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
logit = torch.einsum('bd,de,ev->bv', hidden, proj, weight.t())
if bias is not None:
logit = logit + bias
return logit
def forward(self, hidden, target, keep_order: bool = False):
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
if hidden.size(0) != target.size(0):
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.out_projs[0])
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target,
layout=torch.strided,
dtype=hidden.dtype,
device=hidden.device,
)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return nll
|
PyTorch/SpeechSynthesis/HiFiGAN/common | common | gpu_affinity | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
|
PyTorch/Segmentation/MaskRCNN/pytorch/configs | configs | e2e_mask_rcnn_R_101_FPN_1x | MODEL:
META_ARCHITECTURE: "GeneralizedRCNN"
WEIGHT: "catalog://ImageNetPretrained/MSRA/R-101"
BACKBONE:
CONV_BODY: "R-101-FPN"
OUT_CHANNELS: 256
RPN:
USE_FPN: True
ANCHOR_STRIDE: (4, 8, 16, 32, 64)
PRE_NMS_TOP_N_TRAIN: 2000
PRE_NMS_TOP_N_TEST: 1000
POST_NMS_TOP_N_TEST: 1000
FPN_POST_NMS_TOP_N_TEST: 1000
ROI_HEADS:
USE_FPN: True
ROI_BOX_HEAD:
POOLER_RESOLUTION: 7
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
POOLER_SAMPLING_RATIO: 2
FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor"
PREDICTOR: "FPNPredictor"
ROI_MASK_HEAD:
POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125)
FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor"
PREDICTOR: "MaskRCNNC4Predictor"
POOLER_RESOLUTION: 14
POOLER_SAMPLING_RATIO: 2
RESOLUTION: 28
SHARE_BOX_FEATURE_EXTRACTOR: False
MASK_ON: True
DATASETS:
TRAIN: ("coco_2014_train", "coco_2014_valminusminival")
TEST: ("coco_2014_minival",)
DATALOADER:
SIZE_DIVISIBILITY: 32
SOLVER:
BASE_LR: 0.02
WEIGHT_DECAY: 0.0001
STEPS: (60000, 80000)
MAX_ITER: 90000
|
CUDA-Optimized/FastSpeech/fastspeech/utils | utils | pytorch | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
def to_device_async(tensor, device):
return tensor.to(device, non_blocking=True)
def to_gpu_async(cpu_tensor):
return cpu_tensor.to('cuda', non_blocking=True)
def to_cpu_numpy(gpu_tensor):
if not isinstance(gpu_tensor, torch.Tensor):
return gpu_tensor
return gpu_tensor.detach().cpu().numpy()
def remove_module_in_state_dict(state_dict):
"""
If model is saved with DataParallel, checkpoint keys is started with 'module.' remove it and return new state dict
:param checkpoint:
:return: new checkpoint
"""
new_state_dict = {}
for key, val in state_dict.items():
new_key = key.replace('module.', '')
new_state_dict[new_key] = val
return new_state_dict
|
Tools/PyTorch/TimeSeriesPredictionPlatform/inference | inference | deploy | #!/bin/bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# export TRITON_MODEL_OVERWRITE=True
NAV_DIR=$1
NV_VISIBLE_DEVICES=$2
echo "Start"
# Create common bridge for client and server
BRIDGE_NAME="bridge"
# docker network create ${BRIDGE_NAME}
# Clean up
# cleanup() {
# docker kill trt_server_cont
# docker network rm ${BRIDGE_NAME}
# }
# trap cleanup EXIT
# trap cleanup SIGTERM
# Start Server
echo Starting server...
SERVER_ID=$(bash inference/launch_triton_server.sh ${BRIDGE_NAME} ${NAV_DIR} $NV_VISIBLE_DEVICES )
echo $SERVER_ID
# SERVER_IP=$( docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${SERVER_ID} )
SERVER_URI="localhost"
echo "Waiting for TRITON Server to be ready at http://$SERVER_URI:8000..."
live_command="curl -i -m 1 -L -s -o /dev/null -w %{http_code} http://$SERVER_URI:8000/v2/health/live"
ready_command="curl -i -m 1 -L -s -o /dev/null -w %{http_code} http://$SERVER_URI:8000/v2/health/ready"
current_status=$($live_command)
echo $current_status
tempvar=0
# First check the current status. If that passes, check the json. If either fail, loop
while [[ ${current_status} != "200" ]] || [[ $($ready_command) != "200" ]]; do
printf "."
sleep 1
current_status=$($live_command)
if [[ $tempvar -ge 30 ]]; then
echo "Timeout waiting for triton server"
exit 1
break
fi
tempvar=$tempvar+1
done
echo "TRITON Server is ready!"
|
TensorFlow2/LanguageModeling/BERT | BERT | gpu_affinity | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import os
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
return [i for i, e in enumerate(affinity_list) if e != 0]
def set_affinity(gpu_id=None):
if gpu_id is None:
gpu_id = int(os.getenv('LOCAL_RANK', 0))
dev = device(gpu_id)
os.sched_setaffinity(0, dev.getCpuAffinity())
# list of ints representing the logical cores this process is now affinitied with
return os.sched_getaffinity(0)
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | box_list_ops | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List operations.
Example box operations that are supported:
* areas: compute bounding box areas
* iou: pairwise intersection-over-union scores
* sq_dist: pairwise distances between bounding boxes
Whenever box_list_ops functions output a BoxList, the fields of the incoming
BoxList are retained unless documented otherwise.
"""
import tensorflow as tf
from object_detection.core import box_list
from object_detection.utils import ops
from object_detection.utils import shape_utils
class SortOrder(object):
"""Enum class for sort order.
Attributes:
ascend: ascend order.
descend: descend order.
"""
ascend = 1
descend = 2
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def height_width(boxlist, scope=None):
"""Computes height and width of boxes in boxlist.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
Height: A tensor with shape [N] representing box heights.
Width: A tensor with shape [N] representing box widths.
"""
with tf.name_scope(scope, 'HeightWidth'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze(y_max - y_min, [1]), tf.squeeze(x_max - x_min, [1])
def scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def clip_to_window(boxlist, window, filter_nonoverlapping=True, scope=None):
"""Clip bounding boxes to a window.
This op clips any input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
window to which the op should clip boxes.
filter_nonoverlapping: whether to filter out boxes that do not overlap at
all with the window.
scope: name scope.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
with tf.name_scope(scope, 'ClipToWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
y_min_clipped = tf.maximum(tf.minimum(y_min, win_y_max), win_y_min)
y_max_clipped = tf.maximum(tf.minimum(y_max, win_y_max), win_y_min)
x_min_clipped = tf.maximum(tf.minimum(x_min, win_x_max), win_x_min)
x_max_clipped = tf.maximum(tf.minimum(x_max, win_x_max), win_x_min)
clipped = box_list.BoxList(
tf.concat([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped],
1))
clipped = _copy_extra_fields(clipped, boxlist)
if filter_nonoverlapping:
areas = area(clipped)
nonzero_area_indices = tf.cast(
tf.reshape(tf.where(tf.greater(areas, 0.0)), [-1]), tf.int32)
clipped = gather(clipped, nonzero_area_indices)
return clipped
def prune_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall outside a given window.
This function prunes bounding boxes that even partially fall outside the given
window. See also clip_to_window which only prunes bounding boxes that fall
completely outside the window, and clips any bounding boxes that partially
overflow.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.less(y_min, win_y_min), tf.less(x_min, win_x_min),
tf.greater(y_max, win_y_max), tf.greater(x_max, win_x_max)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def prune_completely_outside_window(boxlist, window, scope=None):
"""Prunes bounding boxes that fall completely outside of the given window.
The function clip_to_window prunes bounding boxes that fall
completely outside the window, but also clips any bounding boxes that
partially overflow. This function does not clip partially overflowing boxes.
Args:
boxlist: a BoxList holding M_in boxes.
window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
of the window
scope: name scope.
Returns:
pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
the window.
valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
in the input tensor.
"""
with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
coordinate_violations = tf.concat([
tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
], 1)
valid_indices = tf.reshape(
tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
return gather(boxlist, valid_indices), valid_indices
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def matched_intersection(boxlist1, boxlist2, scope=None):
"""Compute intersection areas between corresponding boxes in two boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise intersections
"""
with tf.name_scope(scope, 'MatchedIntersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
min_ymax = tf.minimum(y_max1, y_max2)
max_ymin = tf.maximum(y_min1, y_min2)
intersect_heights = tf.maximum(0.0, min_ymax - max_ymin)
min_xmax = tf.minimum(x_max1, x_max2)
max_xmin = tf.maximum(x_min1, x_min2)
intersect_widths = tf.maximum(0.0, min_xmax - max_xmin)
return tf.reshape(intersect_heights * intersect_widths, [-1])
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def matched_iou(boxlist1, boxlist2, scope=None):
"""Compute intersection-over-union between corresponding boxes in boxlists.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'MatchedIOU'):
intersections = matched_intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = areas1 + areas2 - intersections
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
def ioa(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-area between box collections.
intersection-over-area (IOA) between two boxes box1 and box2 is defined as
their intersection area over box2's area. Note that ioa is not symmetric,
that is, ioa(box1, box2) != ioa(box2, box1).
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise ioa scores.
"""
with tf.name_scope(scope, 'IOA'):
intersections = intersection(boxlist1, boxlist2)
areas = tf.expand_dims(area(boxlist2), 0)
return tf.truediv(intersections, areas)
def prune_non_overlapping_boxes(
boxlist1, boxlist2, min_overlap=0.0, scope=None):
"""Prunes the boxes in boxlist1 that overlap less than thresh with boxlist2.
For each box in boxlist1, we want its IOA to be more than minoverlap with
at least one of the boxes in boxlist2. If it does not, we remove it.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
min_overlap: Minimum required overlap between boxes, to count them as
overlapping.
scope: name scope.
Returns:
new_boxlist1: A pruned boxlist with size [N', 4].
keep_inds: A tensor with shape [N'] indexing kept bounding boxes in the
first input BoxList `boxlist1`.
"""
with tf.name_scope(scope, 'PruneNonOverlappingBoxes'):
ioa_ = ioa(boxlist2, boxlist1) # [M, N] tensor
ioa_ = tf.reduce_max(ioa_, reduction_indices=[0]) # [N] tensor
keep_bool = tf.greater_equal(ioa_, tf.constant(min_overlap))
keep_inds = tf.squeeze(tf.where(keep_bool), squeeze_dims=[1])
new_boxlist1 = gather(boxlist1, keep_inds)
return new_boxlist1, keep_inds
def prune_small_boxes(boxlist, min_side, scope=None):
"""Prunes small boxes in the boxlist which have a side smaller than min_side.
Args:
boxlist: BoxList holding N boxes.
min_side: Minimum width AND height of box to survive pruning.
scope: name scope.
Returns:
A pruned boxlist.
"""
with tf.name_scope(scope, 'PruneSmallBoxes'):
height, width = height_width(boxlist)
is_valid = tf.logical_and(tf.greater_equal(width, min_side),
tf.greater_equal(height, min_side))
return gather(boxlist, tf.reshape(tf.where(is_valid), [-1]))
def change_coordinate_frame(boxlist, window, scope=None):
"""Change coordinate frame of the boxlist to be relative to window's frame.
Given a window of the form [ymin, xmin, ymax, xmax],
changes bounding box coordinates from boxlist to be relative to this window
(e.g., the min corner maps to (0,0) and the max corner maps to (1,1)).
An example use case is data augmentation: where we are given groundtruth
boxes (boxlist) and would like to randomly crop the image to some
window (window). In this case we need to change the coordinate frame of
each groundtruth box to be relative to this new window.
Args:
boxlist: A BoxList object holding N boxes.
window: A rank 1 tensor [4].
scope: name scope.
Returns:
Returns a BoxList object with N boxes.
"""
with tf.name_scope(scope, 'ChangeCoordinateFrame'):
win_height = window[2] - window[0]
win_width = window[3] - window[1]
boxlist_new = scale(box_list.BoxList(
boxlist.get() - [window[0], window[1], window[0], window[1]]),
1.0 / win_height, 1.0 / win_width)
boxlist_new = _copy_extra_fields(boxlist_new, boxlist)
return boxlist_new
def sq_dist(boxlist1, boxlist2, scope=None):
"""Computes the pairwise squared distances between box corners.
This op treats each box as if it were a point in a 4d Euclidean space and
computes pairwise squared distances.
Mathematically, we are given two matrices of box coordinates X and Y,
where X(i,:) is the i'th row of X, containing the 4 numbers defining the
corners of the i'th box in boxlist1. Similarly Y(j,:) corresponds to
boxlist2. We compute
Z(i,j) = ||X(i,:) - Y(j,:)||^2
= ||X(i,:)||^2 + ||Y(j,:)||^2 - 2 X(i,:)' * Y(j,:),
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise distances
"""
with tf.name_scope(scope, 'SqDist'):
sqnorm1 = tf.reduce_sum(tf.square(boxlist1.get()), 1, keep_dims=True)
sqnorm2 = tf.reduce_sum(tf.square(boxlist2.get()), 1, keep_dims=True)
innerprod = tf.matmul(boxlist1.get(), boxlist2.get(),
transpose_a=False, transpose_b=True)
return sqnorm1 + tf.transpose(sqnorm2) - 2.0 * innerprod
def boolean_mask(boxlist, indicator, fields=None, scope=None,
use_static_shapes=False, indicator_sum=None):
"""Select boxes from BoxList according to indicator and return new BoxList.
`boolean_mask` returns the subset of boxes that are marked as "True" by the
indicator tensor. By default, `boolean_mask` returns boxes corresponding to
the input index list, as well as all additional fields stored in the boxlist
(indexing into the first dimension). However one can optionally only draw
from a subset of fields.
Args:
boxlist: BoxList holding N boxes
indicator: a rank-1 boolean tensor
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
indicator_sum: An integer containing the sum of `indicator` vector. Only
required if `use_static_shape` is True.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indicator
Raises:
ValueError: if `indicator` is not a rank-1 boolean tensor.
"""
with tf.name_scope(scope, 'BooleanMask'):
if indicator.shape.ndims != 1:
raise ValueError('indicator should have rank 1')
if indicator.dtype != tf.bool:
raise ValueError('indicator should be a boolean tensor')
if use_static_shapes:
if not (indicator_sum and isinstance(indicator_sum, int)):
raise ValueError('`indicator_sum` must be a of type int')
selected_positions = tf.to_float(indicator)
indexed_positions = tf.cast(
tf.multiply(
tf.cumsum(selected_positions), selected_positions),
dtype=tf.int32)
one_hot_selector = tf.one_hot(
indexed_positions - 1, indicator_sum, dtype=tf.float32)
sampled_indices = tf.cast(
tf.tensordot(
tf.to_float(tf.range(tf.shape(indicator)[0])),
one_hot_selector,
axes=[0, 0]),
dtype=tf.int32)
return gather(boxlist, sampled_indices, use_static_shapes=True)
else:
subboxlist = box_list.BoxList(tf.boolean_mask(boxlist.get(), indicator))
if fields is None:
fields = boxlist.get_extra_fields()
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = tf.boolean_mask(boxlist.get_field(field), indicator)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def gather(boxlist, indices, fields=None, scope=None, use_static_shapes=False):
"""Gather boxes from BoxList according to indices and return new BoxList.
By default, `gather` returns boxes corresponding to the input index list, as
well as all additional fields stored in the boxlist (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
boxlist: BoxList holding N boxes
indices: a rank-1 tensor of type int32 / int64
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
scope: name scope.
use_static_shapes: Whether to use an implementation with static shape
gurantees.
Returns:
subboxlist: a BoxList corresponding to the subset of the input BoxList
specified by indices
Raises:
ValueError: if specified field is not contained in boxlist or if the
indices are not of type int32
"""
with tf.name_scope(scope, 'Gather'):
if len(indices.shape.as_list()) != 1:
raise ValueError('indices should have rank 1')
if indices.dtype != tf.int32 and indices.dtype != tf.int64:
raise ValueError('indices should be an int32 / int64 tensor')
gather_op = tf.gather
if use_static_shapes:
gather_op = ops.matmul_gather_on_zeroth_axis
subboxlist = box_list.BoxList(gather_op(boxlist.get(), indices))
if fields is None:
fields = boxlist.get_extra_fields()
fields += ['boxes']
for field in fields:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all specified fields')
subfieldlist = gather_op(boxlist.get_field(field), indices)
subboxlist.add_field(field, subfieldlist)
return subboxlist
def concatenate(boxlists, fields=None, scope=None):
"""Concatenate list of BoxLists.
This op concatenates a list of input BoxLists into a larger BoxList. It also
handles concatenation of BoxList fields as long as the field tensor shapes
are equal except for the first dimension.
Args:
boxlists: list of BoxList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxList in the list are included in the
concatenation.
scope: name scope.
Returns:
a BoxList with number of boxes equal to
sum([boxlist.num_boxes() for boxlist in BoxList])
Raises:
ValueError: if boxlists is invalid (i.e., is not a list, is empty, or
contains non BoxList objects), or if requested fields are not contained in
all boxlists
"""
with tf.name_scope(scope, 'Concatenate'):
if not isinstance(boxlists, list):
raise ValueError('boxlists should be a list')
if not boxlists:
raise ValueError('boxlists should have nonzero length')
for boxlist in boxlists:
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('all elements of boxlists should be BoxList objects')
concatenated = box_list.BoxList(
tf.concat([boxlist.get() for boxlist in boxlists], 0))
if fields is None:
fields = boxlists[0].get_extra_fields()
for field in fields:
first_field_shape = boxlists[0].get_field(field).get_shape().as_list()
first_field_shape[0] = -1
if None in first_field_shape:
raise ValueError('field %s must have fully defined shape except for the'
' 0th dimension.' % field)
for boxlist in boxlists:
if not boxlist.has_field(field):
raise ValueError('boxlist must contain all requested fields')
field_shape = boxlist.get_field(field).get_shape().as_list()
field_shape[0] = -1
if field_shape != first_field_shape:
raise ValueError('field %s must have same shape for all boxlists '
'except for the 0th dimension.' % field)
concatenated_field = tf.concat(
[boxlist.get_field(field) for boxlist in boxlists], 0)
concatenated.add_field(field, concatenated_field)
return concatenated
def sort_by_field(boxlist, field, order=SortOrder.descend, scope=None):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
boxlist: BoxList holding N boxes.
field: A BoxList field for sorting and reordering the BoxList.
order: (Optional) descend or ascend. Default is descend.
scope: name scope.
Returns:
sorted_boxlist: A sorted BoxList with the field in the specified order.
Raises:
ValueError: if specified field does not exist
ValueError: if the order is not either descend or ascend
"""
with tf.name_scope(scope, 'SortByField'):
if order != SortOrder.descend and order != SortOrder.ascend:
raise ValueError('Invalid sort order')
field_to_sort = boxlist.get_field(field)
if len(field_to_sort.shape.as_list()) != 1:
raise ValueError('Field should have rank 1')
num_boxes = boxlist.num_boxes()
num_entries = tf.size(field_to_sort)
length_assert = tf.Assert(
tf.equal(num_boxes, num_entries),
['Incorrect field size: actual vs expected.', num_entries, num_boxes])
with tf.control_dependencies([length_assert]):
_, sorted_indices = tf.nn.top_k(field_to_sort, num_boxes, sorted=True)
if order == SortOrder.ascend:
sorted_indices = tf.reverse_v2(sorted_indices, [0])
return gather(boxlist, sorted_indices)
def visualize_boxes_in_image(image, boxlist, normalized=False, scope=None):
"""Overlay bounding box list on image.
Currently this visualization plots a 1 pixel thick red bounding box on top
of the image. Note that tf.image.draw_bounding_boxes essentially is
1 indexed.
Args:
image: an image tensor with shape [height, width, 3]
boxlist: a BoxList
normalized: (boolean) specify whether corners are to be interpreted
as absolute coordinates in image space or normalized with respect to the
image size.
scope: name scope.
Returns:
image_and_boxes: an image tensor with shape [height, width, 3]
"""
with tf.name_scope(scope, 'VisualizeBoxesInImage'):
if not normalized:
height, width, _ = tf.unstack(tf.shape(image))
boxlist = scale(boxlist,
1.0 / tf.cast(height, tf.float32),
1.0 / tf.cast(width, tf.float32))
corners = tf.expand_dims(boxlist.get(), 0)
image = tf.expand_dims(image, 0)
return tf.squeeze(tf.image.draw_bounding_boxes(image, corners), [0])
def filter_field_value_equals(boxlist, field, value, scope=None):
"""Filter to keep only boxes with field entries equal to the given value.
Args:
boxlist: BoxList holding N boxes.
field: field name for filtering.
value: scalar value.
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not have
the specified field.
"""
with tf.name_scope(scope, 'FilterFieldValueEquals'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field(field):
raise ValueError('boxlist must contain the specified field')
filter_field = boxlist.get_field(field)
gather_index = tf.reshape(tf.where(tf.equal(filter_field, value)), [-1])
return gather(boxlist, gather_index)
def filter_greater_than(boxlist, thresh, scope=None):
"""Filter to keep only boxes with score exceeding a given threshold.
This op keeps the collection of boxes whose corresponding scores are
greater than the input threshold.
TODO(jonathanhuang): Change function name to filter_scores_greater_than
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
scope: name scope.
Returns:
a BoxList holding M boxes where M <= N
Raises:
ValueError: if boxlist not a BoxList object or if it does not
have a scores field
"""
with tf.name_scope(scope, 'FilterGreaterThan'):
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
scores = boxlist.get_field('scores')
if len(scores.shape.as_list()) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape.as_list()) == 2 and scores.shape.as_list()[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = tf.cast(tf.reshape(
tf.where(tf.greater(scores, thresh)),
[-1]), tf.int32)
return gather(boxlist, high_score_indices)
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
with tf.device('/CPU:0'):
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices)
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def to_normalized_coordinates(boxlist, height, width,
check_range=True, scope=None):
"""Converts absolute box coordinates to normalized coordinates in [0, 1].
Usually one uses the dynamic shape of the image or conv-layer tensor:
boxlist = box_list_ops.to_normalized_coordinates(boxlist,
tf.shape(images)[1],
tf.shape(images)[2]),
This function raises an assertion failed error at graph execution time when
the maximum coordinate is smaller than 1.01 (which means that coordinates are
already normalized). The value 1.01 is to deal with small rounding errors.
Args:
boxlist: BoxList with coordinates in terms of pixel-locations.
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
scope: name scope.
Returns:
boxlist with normalized coordinates in [0, 1].
"""
with tf.name_scope(scope, 'ToNormalizedCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
if check_range:
max_val = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(tf.greater(max_val, 1.01),
['max value is lower than 1.01: ', max_val])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, 1 / height, 1 / width)
def to_absolute_coordinates(boxlist,
height,
width,
check_range=True,
maximum_normalized_coordinate=1.1,
scope=None):
"""Converts normalized box coordinates to absolute pixel coordinates.
This function raises an assertion failed error when the maximum box coordinate
value is larger than maximum_normalized_coordinate (in which case coordinates
are already absolute).
Args:
boxlist: BoxList with coordinates in range [0, 1].
height: Maximum value for height of absolute box coordinates.
width: Maximum value for width of absolute box coordinates.
check_range: If True, checks if the coordinates are normalized or not.
maximum_normalized_coordinate: Maximum coordinate value to be considered
as normalized, default to 1.1.
scope: name scope.
Returns:
boxlist with absolute coordinates in terms of the image size.
"""
with tf.name_scope(scope, 'ToAbsoluteCoordinates'):
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
# Ensure range of input boxes is correct.
if check_range:
box_maximum = tf.reduce_max(boxlist.get())
max_assert = tf.Assert(
tf.greater_equal(maximum_normalized_coordinate, box_maximum),
['maximum box coordinate value is larger '
'than %f: ' % maximum_normalized_coordinate, box_maximum])
with tf.control_dependencies([max_assert]):
width = tf.identity(width)
return scale(boxlist, height, width)
def refine_boxes_multi_class(pool_boxes,
num_classes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Box refinement is done independently for each class.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field and a rank 1 'classes' field.
num_classes: (int scalar) Number of classes.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores and classes field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
if not pool_boxes.has_field('classes'):
raise ValueError('pool_boxes must have a \'classes\' field')
refined_boxes = []
for i in range(num_classes):
boxes_class = filter_field_value_equals(pool_boxes, 'classes', i)
refined_boxes_class = refine_boxes(boxes_class, nms_iou_thresh,
nms_max_detections, voting_iou_thresh)
refined_boxes.append(refined_boxes_class)
return sort_by_field(concatenate(refined_boxes), 'scores')
def refine_boxes(pool_boxes,
nms_iou_thresh,
nms_max_detections,
voting_iou_thresh=0.5):
"""Refines a pool of boxes using non max suppression and box voting.
Args:
pool_boxes: (BoxList) A collection of boxes to be refined. pool_boxes must
have a rank 1 'scores' field.
nms_iou_thresh: (float scalar) iou threshold for non max suppression (NMS).
nms_max_detections: (int scalar) maximum output size for NMS.
voting_iou_thresh: (float scalar) iou threshold for box voting.
Returns:
BoxList of refined boxes.
Raises:
ValueError: if
a) nms_iou_thresh or voting_iou_thresh is not in [0, 1].
b) pool_boxes is not a BoxList.
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= nms_iou_thresh <= 1.0:
raise ValueError('nms_iou_thresh must be between 0 and 1')
if not 0.0 <= voting_iou_thresh <= 1.0:
raise ValueError('voting_iou_thresh must be between 0 and 1')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
nms_boxes = non_max_suppression(
pool_boxes, nms_iou_thresh, nms_max_detections)
return box_voting(nms_boxes, pool_boxes, voting_iou_thresh)
def box_voting(selected_boxes, pool_boxes, iou_thresh=0.5):
"""Performs box voting as described in S. Gidaris and N. Komodakis, ICCV 2015.
Performs box voting as described in 'Object detection via a multi-region &
semantic segmentation-aware CNN model', Gidaris and Komodakis, ICCV 2015. For
each box 'B' in selected_boxes, we find the set 'S' of boxes in pool_boxes
with iou overlap >= iou_thresh. The location of B is set to the weighted
average location of boxes in S (scores are used for weighting). And the score
of B is set to the average score of boxes in S.
Args:
selected_boxes: BoxList containing a subset of boxes in pool_boxes. These
boxes are usually selected from pool_boxes using non max suppression.
pool_boxes: BoxList containing a set of (possibly redundant) boxes.
iou_thresh: (float scalar) iou threshold for matching boxes in
selected_boxes and pool_boxes.
Returns:
BoxList containing averaged locations and scores for each box in
selected_boxes.
Raises:
ValueError: if
a) selected_boxes or pool_boxes is not a BoxList.
b) if iou_thresh is not in [0, 1].
c) pool_boxes does not have a scores field.
"""
if not 0.0 <= iou_thresh <= 1.0:
raise ValueError('iou_thresh must be between 0 and 1')
if not isinstance(selected_boxes, box_list.BoxList):
raise ValueError('selected_boxes must be a BoxList')
if not isinstance(pool_boxes, box_list.BoxList):
raise ValueError('pool_boxes must be a BoxList')
if not pool_boxes.has_field('scores'):
raise ValueError('pool_boxes must have a \'scores\' field')
iou_ = iou(selected_boxes, pool_boxes)
match_indicator = tf.to_float(tf.greater(iou_, iou_thresh))
num_matches = tf.reduce_sum(match_indicator, 1)
# TODO(kbanoop): Handle the case where some boxes in selected_boxes do not
# match to any boxes in pool_boxes. For such boxes without any matches, we
# should return the original boxes without voting.
match_assert = tf.Assert(
tf.reduce_all(tf.greater(num_matches, 0)),
['Each box in selected_boxes must match with at least one box '
'in pool_boxes.'])
scores = tf.expand_dims(pool_boxes.get_field('scores'), 1)
scores_assert = tf.Assert(
tf.reduce_all(tf.greater_equal(scores, 0)),
['Scores must be non negative.'])
with tf.control_dependencies([scores_assert, match_assert]):
sum_scores = tf.matmul(match_indicator, scores)
averaged_scores = tf.reshape(sum_scores, [-1]) / num_matches
box_locations = tf.matmul(match_indicator,
pool_boxes.get() * scores) / sum_scores
averaged_boxes = box_list.BoxList(box_locations)
_copy_extra_fields(averaged_boxes, selected_boxes)
averaged_boxes.add_field('scores', averaged_scores)
return averaged_boxes
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
"""Pads or clips all fields of a BoxList.
Args:
boxlist: A BoxList with arbitrary of number of boxes.
num_boxes: First num_boxes in boxlist are kept.
The fields are zero-padded if num_boxes is bigger than the
actual number of boxes.
scope: name scope.
Returns:
BoxList with all fields padded or clipped.
"""
with tf.name_scope(scope, 'PadOrClipBoxList'):
subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(
boxlist.get(), num_boxes))
for field in boxlist.get_extra_fields():
subfield = shape_utils.pad_or_clip_tensor(
boxlist.get_field(field), num_boxes)
subboxlist.add_field(field, subfield)
return subboxlist
def select_random_box(boxlist,
default_box=None,
seed=None,
scope=None):
"""Selects a random bounding box from a `BoxList`.
Args:
boxlist: A BoxList.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[-1., -1., -1., -1.]].
seed: Random seed.
scope: Name scope.
Returns:
bbox: A [1, 4] tensor with a random bounding box.
valid: A bool tensor indicating whether a valid bounding box is returned
(True) or whether the default box is returned (False).
"""
with tf.name_scope(scope, 'SelectRandomBox'):
bboxes = boxlist.get()
combined_shape = shape_utils.combined_static_and_dynamic_shape(bboxes)
number_of_boxes = combined_shape[0]
default_box = default_box or tf.constant([[-1., -1., -1., -1.]])
def select_box():
random_index = tf.random_uniform([],
maxval=number_of_boxes,
dtype=tf.int32,
seed=seed)
return tf.expand_dims(bboxes[random_index], axis=0), tf.constant(True)
return tf.cond(
tf.greater_equal(number_of_boxes, 1),
true_fn=select_box,
false_fn=lambda: (default_box, tf.constant(False)))
def get_minimal_coverage_box(boxlist,
default_box=None,
scope=None):
"""Creates a single bounding box which covers all boxes in the boxlist.
Args:
boxlist: A Boxlist.
default_box: A [1, 4] float32 tensor. If no boxes are present in `boxlist`,
this default box will be returned. If None, will use a default box of
[[0., 0., 1., 1.]].
scope: Name scope.
Returns:
A [1, 4] float32 tensor with a bounding box that tightly covers all the
boxes in the box list. If the boxlist does not contain any boxes, the
default box is returned.
"""
with tf.name_scope(scope, 'CreateCoverageBox'):
num_boxes = boxlist.num_boxes()
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
default_box = default_box or tf.constant([[0., 0., 1., 1.]])
return tf.cond(
tf.greater_equal(num_boxes, 1),
true_fn=lambda: coverage_box(boxlist.get()),
false_fn=lambda: default_box)
def sample_boxes_by_jittering(boxlist,
num_boxes_to_sample,
stddev=0.1,
scope=None):
"""Samples num_boxes_to_sample boxes by jittering around boxlist boxes.
It is possible that this function might generate boxes with size 0. The larger
the stddev, this is more probable. For a small stddev of 0.1 this probability
is very small.
Args:
boxlist: A boxlist containing N boxes in normalized coordinates.
num_boxes_to_sample: A positive integer containing the number of boxes to
sample.
stddev: Standard deviation. This is used to draw random offsets for the
box corners from a normal distribution. The offset is multiplied by the
box size so will be larger in terms of pixels for larger boxes.
scope: Name scope.
Returns:
sampled_boxlist: A boxlist containing num_boxes_to_sample boxes in
normalized coordinates.
"""
with tf.name_scope(scope, 'SampleBoxesByJittering'):
num_boxes = boxlist.num_boxes()
box_indices = tf.random_uniform(
[num_boxes_to_sample],
minval=0,
maxval=num_boxes,
dtype=tf.int32)
sampled_boxes = tf.gather(boxlist.get(), box_indices)
sampled_boxes_height = sampled_boxes[:, 2] - sampled_boxes[:, 0]
sampled_boxes_width = sampled_boxes[:, 3] - sampled_boxes[:, 1]
rand_miny_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_minx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxy_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
rand_maxx_gaussian = tf.random_normal([num_boxes_to_sample], stddev=stddev)
miny = rand_miny_gaussian * sampled_boxes_height + sampled_boxes[:, 0]
minx = rand_minx_gaussian * sampled_boxes_width + sampled_boxes[:, 1]
maxy = rand_maxy_gaussian * sampled_boxes_height + sampled_boxes[:, 2]
maxx = rand_maxx_gaussian * sampled_boxes_width + sampled_boxes[:, 3]
maxy = tf.maximum(miny, maxy)
maxx = tf.maximum(minx, maxx)
sampled_boxes = tf.stack([miny, minx, maxy, maxx], axis=1)
sampled_boxes = tf.maximum(tf.minimum(sampled_boxes, 1.0), 0.0)
return box_list.BoxList(sampled_boxes)
|
TensorFlow/Segmentation/UNet_Industrial/model/blocks | blocks | unet_downsample | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import tensorflow as tf
from model import layers
from model import blocks
__all__ = ["downsample_unet_block"]
def downsample_unet_block(
inputs, filters, data_format='NCHW', is_training=True, conv2d_hparams=None, block_name='downsample_block'
):
if not isinstance(conv2d_hparams, tf.contrib.training.HParams):
raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`")
if data_format not in ['NHWC', 'NCHW']:
raise ValueError("Unknown data format: `%s` (accepted: ['NHWC', 'NCHW'])" % data_format)
with tf.variable_scope(block_name):
net = layers.conv2d(
inputs,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act1'
)
net = layers.conv2d(
net,
n_channels=filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
data_format=data_format,
use_bias=True,
trainable=is_training,
kernel_initializer=conv2d_hparams.kernel_initializer,
bias_initializer=conv2d_hparams.bias_initializer,
)
net = blocks.activation_block(
inputs=net, act_fn=conv2d_hparams.activation_fn, trainable=is_training, block_name='act2'
)
outputs = layers.max_pooling2d(
inputs=net,
pool_size=(2, 2),
strides=(2, 2),
padding='valid',
data_format=data_format,
name="max_pooling2d"
)
return outputs, net
|
PyTorch/LanguageModeling/BERT/triton/dist6l/scripts/docker | docker | build | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build -t bert . -f triton/Dockerfile
|
PaddlePaddle/LanguageModeling/BERT/scripts/configs | configs | squad_config | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dgxa100-80g_8gpu_amp ()
{
init_checkpoint="results/bert-large-uncased/phase2/1563"
epochs="2"
batch_size="32"
learning_rate="4.6e-5"
warmup_proportion="0.2"
precision="amp"
num_gpu="8"
seed="1"
squad_dir="$BERT_PREP_WORKING_DIR/download/squad/v1.1"
vocab_file="vocab/bert-large-uncased-vocab.txt"
CODEDIR=/workspace/bert
OUT_DIR="$CODEDIR/results"
mode="train_eval"
CONFIG_FILE="bert_configs/bert-large-uncased.json"
max_steps="-1"
enable_benchmark="false"
benchmark_steps="100" # It takes effect only after the enable_benchmark is set to true
benchmark_warmup_steps="100" # It takes effect only after the enable_benchmark is set to true
echo $init_checkpoint $epochs $batch_size $learning_rate $warmup_proportion \
$precision $num_gpu $seed $squad_dir $vocab_file $OUT_DIR $mode $CONFIG_FILE \
$max_steps $enable_benchmark $benchmark_steps $benchmark_warmup_steps
}
dgxa100-80g_8gpu_tf32 ()
{
init_checkpoint="results/bert-large-uncased/phase2/1563"
epochs="2"
batch_size="32"
learning_rate="4.6e-5"
warmup_proportion="0.2"
precision="amp"
num_gpu="8"
seed="1"
squad_dir="$BERT_PREP_WORKING_DIR/download/squad/v1.1"
vocab_file="vocab/bert-large-uncased-vocab.txt"
CODEDIR=/workspace/bert
OUT_DIR="$CODEDIR/results"
mode="train_eval"
CONFIG_FILE="bert_configs/bert-large-uncased.json"
max_steps="-1"
enable_benchmark="false"
benchmark_steps="100" # It takes effect only after the enable_benchmark is set to true
benchmark_warmup_steps="100" # It takes effect only after the enable_benchmark is set to true
echo $init_checkpoint $epochs $batch_size $learning_rate $warmup_proportion \
$precision $num_gpu $seed $squad_dir $vocab_file $OUT_DIR $mode $CONFIG_FILE \
$max_steps $enable_benchmark $benchmark_steps $benchmark_warmup_steps
}
|
TensorFlow2/LanguageModeling/BERT | BERT | .gitignore | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Initially taken from Github's Python gitignore file
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
#Data
data/download
data/extracted
data/formatted_one_article_per_line
data/sharded
data/hdf5*
data/tfrecord*
data/*/*.zip
#Resutls
results/
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
.vscode/
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# TensorRT
*.engine
models/
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/model/models | models | fpn | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature Pyramid Network.
Feature Pyramid Networks were proposed in:
[1] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan,
, and Serge Belongie
Feature Pyramid Networks for Object Detection. CVPR 2017.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from mrcnn_tf2.ops import spatial_transform_ops
class FPNNetwork(tf.keras.models.Model):
def __init__(self, min_level=3, max_level=7, filters=256, trainable=True):
"""Generates multiple scale feature pyramid (FPN).
Args:
feats_bottom_up: a dictionary of tensor with level as keys and bottom up
feature tensors as values. They are the features to generate FPN features.
min_level: the minimum level number to generate FPN features.
max_level: the maximum level number to generate FPN features.
filters: the FPN filter size.
Returns:
feats: a dictionary of tensor with level as keys and the generated FPN
features as values.
"""
super().__init__(name="fpn", trainable=trainable)
self._local_layers = dict()
self._min_level = min_level
self._max_level = max_level
self._filters = filters
self._backbone_max_level = 5 # max(feats_bottom_up.keys())
self._upsample_max_level = (
self._backbone_max_level if self._max_level > self._backbone_max_level else self._max_level
)
self._local_layers["stage1"] = dict()
for level in range(self._min_level, self._upsample_max_level + 1):
self._local_layers["stage1"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=(1, 1),
padding='same',
name=f'l{level}',
trainable=trainable
)
self._local_layers["stage2"] = dict()
# add post-hoc 3x3 convolution kernel
for level in range(self._min_level, self._upsample_max_level + 1):
self._local_layers["stage2"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
strides=(1, 1),
kernel_size=(3, 3),
padding='same',
name=f'post_hoc_d{level}',
trainable=trainable
)
self._local_layers["stage3_1"] = dict()
self._local_layers["stage3_2"] = dict()
if self._max_level == self._upsample_max_level + 1:
self._local_layers["stage3_1"] = tf.keras.layers.MaxPool2D(
pool_size=1,
strides=2,
padding='valid',
name='p%d' % self._max_level,
trainable=trainable
)
else:
for level in range(self._upsample_max_level + 1, self._max_level + 1):
self._local_layers["stage3_2"][str(level)] = tf.keras.layers.Conv2D(
filters=self._filters,
strides=(2, 2),
kernel_size=(3, 3),
padding='same',
name=f'p{level}',
trainable=trainable
)
def call(self, inputs, *args, **kwargs):
feats_bottom_up = inputs
# lateral connections
feats_lateral = {}
for level in range(self._min_level, self._upsample_max_level + 1):
feats_lateral[level] = self._local_layers["stage1"][str(level)](feats_bottom_up[level])
# add top-down path
feats = {self._upsample_max_level: feats_lateral[self._upsample_max_level]}
for level in range(self._upsample_max_level - 1, self._min_level - 1, -1):
feats[level] = spatial_transform_ops.nearest_upsampling(
feats[level + 1], 2
) + feats_lateral[level]
# add post-hoc 3x3 convolution kernel
for level in range(self._min_level, self._upsample_max_level + 1):
feats[level] = self._local_layers["stage2"][str(level)](feats[level])
if self._max_level == self._upsample_max_level + 1:
feats[self._max_level] = self._local_layers["stage3_1"](feats[self._max_level - 1])
else:
for level in range(self._upsample_max_level + 1, self._max_level + 1):
feats[level] = self._local_layers["stage3_2"][str(level)](feats[level - 1])
return feats
|
PyTorch/SpeechRecognition/Jasper/common/dali | dali | pipeline | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import multiprocessing
import numpy as np
import torch
import math
import itertools
class DaliPipeline():
def __init__(self, *,
train_pipeline: bool, # True if train pipeline, False if validation pipeline
device_id,
num_threads,
batch_size,
file_root: str,
file_list: str,
sample_rate,
discrete_resample_range: bool,
resample_range: list,
window_size,
window_stride,
nfeatures,
nfft,
frame_splicing_factor,
dither_coeff,
silence_threshold,
preemph_coeff,
pad_align,
max_duration,
mask_time_num_regions,
mask_time_min,
mask_time_max,
mask_freq_num_regions,
mask_freq_min,
mask_freq_max,
mask_both_num_regions,
mask_both_min_time,
mask_both_max_time,
mask_both_min_freq,
mask_both_max_freq,
preprocessing_device="gpu",
is_triton_pipeline=False):
self._dali_init_log(locals())
if torch.distributed.is_initialized():
shard_id = torch.distributed.get_rank()
n_shards = torch.distributed.get_world_size()
else:
shard_id = 0
n_shards = 1
self.preprocessing_device = preprocessing_device.lower()
assert self.preprocessing_device == "cpu" or self.preprocessing_device == "gpu", \
"Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'"
self.frame_splicing_factor = frame_splicing_factor
# TODO(janton): Implement this
assert frame_splicing_factor == 1, "Frame splicing is not yet implemented"
self.resample_range = resample_range
self.discrete_resample_range = discrete_resample_range
self.train = train_pipeline
self.sample_rate = sample_rate
self.dither_coeff = dither_coeff
self.nfeatures = nfeatures
self.max_duration = max_duration
self.mask_params = {
'time_num_regions': mask_time_num_regions,
'time_min': mask_time_min,
'time_max': mask_time_max,
'freq_num_regions': mask_freq_num_regions,
'freq_min': mask_freq_min,
'freq_max': mask_freq_max,
'both_num_regions': mask_both_num_regions,
'both_min_time': mask_both_min_time,
'both_max_time': mask_both_max_time,
'both_min_freq': mask_both_min_freq,
'both_max_freq': mask_both_max_freq,
}
self.do_remove_silence = True if silence_threshold is not None else False
@dali.pipeline_def
def dali_jasper_pipe():
if is_triton_pipeline:
assert not self.train, "Pipeline for Triton shall be a validation pipeline"
if torch.distributed.is_initialized():
raise RuntimeError(
"You're creating Triton pipeline, using multi-process mode. Please use single-process mode.")
encoded, label = fn.external_source(device="cpu", name="DALI_INPUT_0", no_copy=True)
else:
encoded, label = fn.readers.file(device="cpu", name="file_reader",
file_root=file_root, file_list=file_list, shard_id=shard_id,
num_shards=n_shards, shuffle_after_epoch=train_pipeline)
speed_perturbation_coeffs = None
if resample_range is not None:
if discrete_resample_range:
values = [self.resample_range[0], 1.0, self.resample_range[1]]
speed_perturbation_coeffs = fn.random.uniform(device="cpu", values=values)
else:
speed_perturbation_coeffs = fn.random.uniform(device="cpu", range=resample_range)
if self.train and speed_perturbation_coeffs is not None:
dec_sample_rate_arg = speed_perturbation_coeffs * self.sample_rate
elif resample_range is None:
dec_sample_rate_arg = self.sample_rate
else:
dec_sample_rate_arg = None
audio, _ = fn.decoders.audio(encoded, sample_rate=dec_sample_rate_arg, dtype=types.FLOAT, downmix=True)
if self.do_remove_silence:
begin, length = fn.nonsilent_region(audio, cutoff_db=silence_threshold)
audio = fn.slice(audio, begin, length, axes=[0])
# Max duration drop is performed at DataLayer stage
if self.preprocessing_device == "gpu":
audio = audio.gpu()
if self.dither_coeff != 0.:
audio = audio + fn.random.normal(audio) * self.dither_coeff
audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff)
spec = fn.spectrogram(audio, nfft=nfft,
window_length=window_size * sample_rate, window_step=window_stride * sample_rate)
mel_spec = fn.mel_filter_bank(spec, sample_rate=sample_rate, nfilter=self.nfeatures, normalize=True)
log_features = fn.to_decibels(mel_spec, multiplier=np.log(10), reference=1.0, cutoff_db=math.log(1e-20))
log_features_len = fn.shapes(log_features)
if self.frame_splicing_factor != 1:
log_features_len = self._div_ceil(log_features_len, self.frame_splicing_factor)
log_features = fn.normalize(log_features, axes=[1])
log_features = fn.pad(log_features, axes=[1], fill_value=0, align=pad_align)
if self.train and self._do_spectrogram_masking():
anchors, shapes = fn.external_source(source=self._cutouts_generator, num_outputs=2, cycle=True)
log_features = fn.erase(log_features, anchor=anchors, shape=shapes, axes=[0, 1], fill_value=0,
normalized_anchor=True)
# When modifying DALI pipeline returns, make sure you update `output_map` in DALIGenericIterator invocation
return log_features.gpu(), label.gpu(), log_features_len.gpu()
self.pipe_handle = dali_jasper_pipe(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
def get_pipeline(self):
return self.pipe_handle
@classmethod
def from_config(cls, train_pipeline: bool, device_id, batch_size, file_root: str, file_list: str, config_data: dict,
config_features: dict, device_type: str = "gpu", do_resampling: bool = True,
num_cpu_threads=multiprocessing.cpu_count()):
max_duration = config_data['max_duration']
sample_rate = config_data['sample_rate']
silence_threshold = -60 if config_data['trim_silence'] else None
# TODO Take into account resampling probablity
# TODO config_features['speed_perturbation']['p']
if do_resampling and config_data['speed_perturbation'] is not None:
resample_range = [config_data['speed_perturbation']['min_rate'],
config_data['speed_perturbation']['max_rate']]
discrete_resample_range = config_data['speed_perturbation']['discrete']
else:
resample_range = None
discrete_resample_range = False
window_size = config_features['window_size']
window_stride = config_features['window_stride']
nfeatures = config_features['n_filt']
nfft = config_features['n_fft']
frame_splicing_factor = config_features['frame_splicing']
dither_coeff = config_features['dither']
pad_align = config_features['pad_align']
pad_to_max_duration = config_features['pad_to_max_duration']
assert not pad_to_max_duration, "Padding to max duration currently not supported in DALI"
preemph_coeff = .97
config_spec = config_features['spec_augment']
if config_spec is not None:
mask_time_num_regions = config_spec['time_masks']
mask_time_min = config_spec['min_time']
mask_time_max = config_spec['max_time']
mask_freq_num_regions = config_spec['freq_masks']
mask_freq_min = config_spec['min_freq']
mask_freq_max = config_spec['max_freq']
else:
mask_time_num_regions = 0
mask_time_min = 0
mask_time_max = 0
mask_freq_num_regions = 0
mask_freq_min = 0
mask_freq_max = 0
config_cutout = config_features['cutout_augment']
if config_cutout is not None:
mask_both_num_regions = config_cutout['masks']
mask_both_min_time = config_cutout['min_time']
mask_both_max_time = config_cutout['max_time']
mask_both_min_freq = config_cutout['min_freq']
mask_both_max_freq = config_cutout['max_freq']
else:
mask_both_num_regions = 0
mask_both_min_time = 0
mask_both_max_time = 0
mask_both_min_freq = 0
mask_both_max_freq = 0
inst = cls(train_pipeline=train_pipeline,
device_id=device_id,
preprocessing_device=device_type,
num_threads=num_cpu_threads,
batch_size=batch_size,
file_root=file_root,
file_list=file_list,
sample_rate=sample_rate,
discrete_resample_range=discrete_resample_range,
resample_range=resample_range,
window_size=window_size,
window_stride=window_stride,
nfeatures=nfeatures,
nfft=nfft,
frame_splicing_factor=frame_splicing_factor,
dither_coeff=dither_coeff,
silence_threshold=silence_threshold,
preemph_coeff=preemph_coeff,
pad_align=pad_align,
max_duration=max_duration,
mask_time_num_regions=mask_time_num_regions,
mask_time_min=mask_time_min,
mask_time_max=mask_time_max,
mask_freq_num_regions=mask_freq_num_regions,
mask_freq_min=mask_freq_min,
mask_freq_max=mask_freq_max,
mask_both_num_regions=mask_both_num_regions,
mask_both_min_time=mask_both_min_time,
mask_both_max_time=mask_both_max_time,
mask_both_min_freq=mask_both_min_freq,
mask_both_max_freq=mask_both_max_freq)
return inst.get_pipeline()
@staticmethod
def _dali_init_log(args: dict):
if (not torch.distributed.is_initialized() or (
torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)): # print once
max_len = max([len(ii) for ii in args.keys()])
fmt_string = '\t%' + str(max_len) + 's : %s'
print('Initializing DALI with parameters:')
for keyPair in sorted(args.items()):
print(fmt_string % keyPair)
@staticmethod
def _div_ceil(dividend, divisor):
return (dividend + (divisor - 1)) // divisor
def _do_spectrogram_masking(self):
return self.mask_params['time_num_regions'] > 0 or self.mask_params['freq_num_regions'] > 0 or \
self.mask_params['both_num_regions'] > 0
@staticmethod
def _interleave_lists(*lists):
"""
[*, **, ***], [1, 2, 3], [a, b, c] -> [*, 1, a, **, 2, b, ***, 3, c]
Returns:
iterator over interleaved list
"""
assert all((len(lists[0]) == len(test_l) for test_l in lists)), "All lists have to have the same length"
return itertools.chain(*zip(*lists))
def _generate_cutouts(self):
"""
Returns:
Generates anchors and shapes of the cutout regions.
Single call generates one batch of data.
The output shall be passed to DALI's Erase operator
anchors = [f0 t0 f1 t1 ...]
shapes = [f0w t0h f1w t1h ...]
"""
MAX_TIME_DIMENSION = 20 * 16000
freq_anchors = np.random.random(self.mask_params['freq_num_regions'])
time_anchors = np.random.random(self.mask_params['time_num_regions'])
both_anchors_freq = np.random.random(self.mask_params['both_num_regions'])
both_anchors_time = np.random.random(self.mask_params['both_num_regions'])
anchors = []
for anch in freq_anchors:
anchors.extend([anch, 0])
for anch in time_anchors:
anchors.extend([0, anch])
for t, f in zip(both_anchors_time, both_anchors_freq):
anchors.extend([f, t])
shapes = []
shapes.extend(
self._interleave_lists(
np.random.randint(self.mask_params['freq_min'], self.mask_params['freq_max'] + 1,
self.mask_params['freq_num_regions']),
# XXX: Here, a time dimension of the spectrogram shall be passed.
# However, in DALI ArgumentInput can't come from GPU.
# So we leave the job for Erase (masking operator) to get it together.
[int(MAX_TIME_DIMENSION)] * self.mask_params['freq_num_regions']
)
)
shapes.extend(
self._interleave_lists(
[self.nfeatures] * self.mask_params['time_num_regions'],
np.random.randint(self.mask_params['time_min'], self.mask_params['time_max'] + 1,
self.mask_params['time_num_regions'])
)
)
shapes.extend(
self._interleave_lists(
np.random.randint(self.mask_params['both_min_freq'], self.mask_params['both_max_freq'] + 1,
self.mask_params['both_num_regions']),
np.random.randint(self.mask_params['both_min_time'], self.mask_params['both_max_time'] + 1,
self.mask_params['both_num_regions'])
)
)
return anchors, shapes
def _cutouts_generator(self):
"""
Generator, that wraps cutouts creation in order to randomize inputs
and allow passing them to DALI's ExternalSource operator
"""
def tuples2list(tuples: list):
"""
[(a, b), (c, d)] -> [[a, c], [b, d]]
"""
return map(list, zip(*tuples))
[anchors, shapes] = tuples2list([self._generate_cutouts() for _ in range(self.pipe_handle.max_batch_size)])
yield np.array(anchors, dtype=np.float32), np.array(shapes, dtype=np.float32)
class DaliTritonPipeline(DaliPipeline):
def __init__(self, **kwargs):
kwargs['is_triton_pipeline'] = True
super().__init__(**kwargs)
def serialize_dali_triton_pipeline(output_path: str, config_data: dict, config_features: dict):
pipe = DaliTritonPipeline.from_config(train_pipeline=False, device_id=-1, batch_size=-1, file_root=None,
file_list=None, config_data=config_data, config_features=config_features,
do_resampling=False, num_cpu_threads=-1)
pipe.serialize(filename=output_path)
|
PyTorch/LanguageModeling/BERT/triton/dist4l/scripts/docker | docker | build | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
docker build -t bert . -f triton/Dockerfile
|
PyTorch/Recommendation/NCF | NCF | ncf | # Copyright (c) 2018, deepakn94, codyaustun, robieta. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.jit
from apex.optimizers import FusedAdam
import os
import math
import time
import numpy as np
from argparse import ArgumentParser
import torch
import torch.nn as nn
import utils
import dataloading
from neumf import NeuMF
from feature_spec import FeatureSpec
from neumf_constants import USER_CHANNEL_NAME, ITEM_CHANNEL_NAME, LABEL_CHANNEL_NAME
import dllogger
def synchronized_timestamp():
torch.cuda.synchronize()
return time.time()
def parse_args():
parser = ArgumentParser(description="Train a Neural Collaborative"
" Filtering model")
parser.add_argument('--data', type=str,
help='Path to the directory containing the feature specification yaml')
parser.add_argument('--feature_spec_file', type=str, default='feature_spec.yaml',
help='Name of the feature specification file or path relative to the data directory.')
parser.add_argument('-e', '--epochs', type=int, default=30,
help='Number of epochs for training')
parser.add_argument('-b', '--batch_size', type=int, default=2 ** 20,
help='Number of examples for each iteration. This will be divided by the number of devices')
parser.add_argument('--valid_batch_size', type=int, default=2 ** 20,
help='Number of examples in each validation chunk. This will be the maximum size of a batch '
'on each device.')
parser.add_argument('-f', '--factors', type=int, default=64,
help='Number of predictive factors')
parser.add_argument('--layers', nargs='+', type=int,
default=[256, 256, 128, 64],
help='Sizes of hidden layers for MLP')
parser.add_argument('-n', '--negative_samples', type=int, default=4,
help='Number of negative examples per interaction')
parser.add_argument('-l', '--learning_rate', type=float, default=0.0045,
help='Learning rate for optimizer')
parser.add_argument('-k', '--topk', type=int, default=10,
help='Rank for test examples to be considered a hit')
parser.add_argument('--seed', '-s', type=int, default=None,
help='Manually set random seed for torch')
parser.add_argument('--threshold', '-t', type=float, default=1.0,
help='Stop training early at threshold')
parser.add_argument('--beta1', '-b1', type=float, default=0.25,
help='Beta1 for Adam')
parser.add_argument('--beta2', '-b2', type=float, default=0.5,
help='Beta1 for Adam')
parser.add_argument('--eps', type=float, default=1e-8,
help='Epsilon for Adam')
parser.add_argument('--dropout', type=float, default=0.5,
help='Dropout probability, if equal to 0 will not use dropout at all')
parser.add_argument('--checkpoint_dir', default='', type=str,
help='Path to the directory storing the checkpoint file, '
'passing an empty path disables checkpoint saving')
parser.add_argument('--load_checkpoint_path', default=None, type=str,
help='Path to the checkpoint file to be loaded before training/evaluation')
parser.add_argument('--mode', choices=['train', 'test'], default='train', type=str,
help='Passing "test" will only run a single evaluation; '
'otherwise, full training will be performed')
parser.add_argument('--grads_accumulated', default=1, type=int,
help='Number of gradients to accumulate before performing an optimization step')
parser.add_argument('--amp', action='store_true', help='Enable mixed precision training')
parser.add_argument('--log_path', default='log.json', type=str,
help='Path for the JSON training log')
return parser.parse_args()
def init_distributed(args):
args.world_size = int(os.environ.get('WORLD_SIZE', default=1))
args.distributed = args.world_size > 1
if args.distributed:
args.local_rank = int(os.environ['LOCAL_RANK'])
'''
Set cuda device so everything is done on the right GPU.
THIS MUST BE DONE AS SOON AS POSSIBLE.
'''
torch.cuda.set_device(args.local_rank)
'''Initialize distributed communication'''
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
else:
args.local_rank = 0
def val_epoch(model, dataloader: dataloading.TestDataLoader, k, distributed=False, world_size=1):
model.eval()
user_feature_name = dataloader.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = dataloader.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = dataloader.channel_spec[LABEL_CHANNEL_NAME][0]
with torch.no_grad():
p = []
labels_list = []
losses = []
for batch_dict in dataloader.get_epoch_data():
user_batch = batch_dict[USER_CHANNEL_NAME][user_feature_name]
item_batch = batch_dict[ITEM_CHANNEL_NAME][item_feature_name]
label_batch = batch_dict[LABEL_CHANNEL_NAME][label_feature_name]
prediction_batch = model(user_batch, item_batch, sigmoid=True).detach()
loss_batch = torch.nn.functional.binary_cross_entropy(input=prediction_batch.reshape([-1]),
target=label_batch)
losses.append(loss_batch)
p.append(prediction_batch)
labels_list.append(label_batch)
ignore_mask = dataloader.get_ignore_mask().view(-1, dataloader.samples_in_series)
ratings = torch.cat(p).view(-1, dataloader.samples_in_series)
ratings[ignore_mask] = -1
labels = torch.cat(labels_list).view(-1, dataloader.samples_in_series)
del p, labels_list
top_indices = torch.topk(ratings, k)[1]
# Positive items are always first in a given series
labels_of_selected = torch.gather(labels, 1, top_indices)
ifzero = (labels_of_selected == 1)
hits = ifzero.sum()
ndcg = (math.log(2) / (torch.nonzero(ifzero)[:, 1].view(-1).to(torch.float) + 2).log_()).sum()
total_validation_loss = torch.mean(torch.stack(losses, dim=0))
# torch.nonzero may cause host-device synchronization
if distributed:
torch.distributed.all_reduce(hits, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(ndcg, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(total_validation_loss, op=torch.distributed.ReduceOp.SUM)
total_validation_loss = total_validation_loss / world_size
num_test_cases = dataloader.raw_dataset_length / dataloader.samples_in_series
hr = hits.item() / num_test_cases
ndcg = ndcg.item() / num_test_cases
model.train()
return hr, ndcg, total_validation_loss
def main():
args = parse_args()
init_distributed(args)
if args.local_rank == 0:
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE,
filename=args.log_path),
dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE)])
else:
dllogger.init(backends=[])
dllogger.metadata('train_throughput', {"name": 'train_throughput', 'unit': 'samples/s', 'format': ":.3e"})
dllogger.metadata('best_train_throughput', {'unit': 'samples/s'})
dllogger.metadata('mean_train_throughput', {'unit': 'samples/s'})
dllogger.metadata('eval_throughput', {"name": 'eval_throughput', 'unit': 'samples/s', 'format': ":.3e"})
dllogger.metadata('best_eval_throughput', {'unit': 'samples/s'})
dllogger.metadata('mean_eval_throughput', {'unit': 'samples/s'})
dllogger.metadata('train_epoch_time', {"name": 'train_epoch_time', 'unit': 's', 'format': ":.3f"})
dllogger.metadata('validation_epoch_time', {"name": 'validation_epoch_time', 'unit': 's', 'format': ":.3f"})
dllogger.metadata('time_to_target', {'unit': 's'})
dllogger.metadata('time_to_best_model', {'unit': 's'})
dllogger.metadata('hr@10', {"name": 'hr@10', 'unit': None, 'format': ":.5f"})
dllogger.metadata('best_accuracy', {'unit': None})
dllogger.metadata('best_epoch', {'unit': None})
dllogger.metadata('validation_loss', {"name": 'validation_loss', 'unit': None, 'format': ":.5f"})
dllogger.metadata('train_loss', {"name": 'train_loss', 'unit': None, 'format': ":.5f"})
dllogger.log(data=vars(args), step='PARAMETER')
if args.seed is not None:
torch.manual_seed(args.seed)
if not os.path.exists(args.checkpoint_dir) and args.checkpoint_dir:
print("Saving results to {}".format(args.checkpoint_dir))
os.makedirs(args.checkpoint_dir, exist_ok=True)
# sync workers before timing
if args.distributed:
torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0)
torch.cuda.synchronize()
main_start_time = synchronized_timestamp()
feature_spec_path = os.path.join(args.data, args.feature_spec_file)
feature_spec = FeatureSpec.from_yaml(feature_spec_path)
trainset = dataloading.TorchTensorDataset(feature_spec, mapping_name='train', args=args)
testset = dataloading.TorchTensorDataset(feature_spec, mapping_name='test', args=args)
train_loader = dataloading.TrainDataloader(trainset, args)
test_loader = dataloading.TestDataLoader(testset, args)
# make pytorch memory behavior more consistent later
torch.cuda.empty_cache()
# Create model
user_feature_name = feature_spec.channel_spec[USER_CHANNEL_NAME][0]
item_feature_name = feature_spec.channel_spec[ITEM_CHANNEL_NAME][0]
label_feature_name = feature_spec.channel_spec[LABEL_CHANNEL_NAME][0]
model = NeuMF(nb_users=feature_spec.feature_spec[user_feature_name]['cardinality'],
nb_items=feature_spec.feature_spec[item_feature_name]['cardinality'],
mf_dim=args.factors,
mlp_layer_sizes=args.layers,
dropout=args.dropout)
optimizer = FusedAdam(model.parameters(), lr=args.learning_rate,
betas=(args.beta1, args.beta2), eps=args.eps)
criterion = nn.BCEWithLogitsLoss(reduction='none') # use torch.mean() with dim later to avoid copy to host
# Move model and loss to GPU
model = model.cuda()
criterion = criterion.cuda()
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model)
local_batch = args.batch_size // args.world_size
traced_criterion = torch.jit.trace(criterion.forward,
(torch.rand(local_batch, 1), torch.rand(local_batch, 1)))
print(model)
print("{} parameters".format(utils.count_parameters(model)))
if args.load_checkpoint_path:
state_dict = torch.load(args.load_checkpoint_path)
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
model.load_state_dict(state_dict)
if args.mode == 'test':
start = synchronized_timestamp()
hr, ndcg, val_loss = val_epoch(model, test_loader, args.topk,
distributed=args.distributed, world_size=args.world_size)
val_time = synchronized_timestamp() - start
eval_size = test_loader.raw_dataset_length
eval_throughput = eval_size / val_time
dllogger.log(step=tuple(), data={'best_eval_throughput': eval_throughput,
'hr@10': hr,
'validation_loss': float(val_loss.item())})
return
# this should always be overridden if hr>0.
# It is theoretically possible for the hit rate to be zero in the first epoch, which would result in referring
# to an uninitialized variable.
max_hr = 0
best_epoch = 0
best_model_timestamp = synchronized_timestamp()
train_throughputs, eval_throughputs = [], []
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
for epoch in range(args.epochs):
begin = synchronized_timestamp()
batch_dict_list = train_loader.get_epoch_data()
num_batches = len(batch_dict_list)
for i in range(num_batches // args.grads_accumulated):
for j in range(args.grads_accumulated):
batch_idx = (args.grads_accumulated * i) + j
batch_dict = batch_dict_list[batch_idx]
user_features = batch_dict[USER_CHANNEL_NAME]
item_features = batch_dict[ITEM_CHANNEL_NAME]
user_batch = user_features[user_feature_name]
item_batch = item_features[item_feature_name]
label_features = batch_dict[LABEL_CHANNEL_NAME]
label_batch = label_features[label_feature_name]
with torch.cuda.amp.autocast(enabled=args.amp):
outputs = model(user_batch, item_batch)
loss = traced_criterion(outputs, label_batch.view(-1, 1))
loss = torch.mean(loss.float().view(-1), 0)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
for p in model.parameters():
p.grad = None
del batch_dict_list
train_time = synchronized_timestamp() - begin
begin = synchronized_timestamp()
epoch_samples = train_loader.length_after_augmentation
train_throughput = epoch_samples / train_time
train_throughputs.append(train_throughput)
hr, ndcg, val_loss = val_epoch(model, test_loader, args.topk,
distributed=args.distributed, world_size=args.world_size)
val_time = synchronized_timestamp() - begin
eval_size = test_loader.raw_dataset_length
eval_throughput = eval_size / val_time
eval_throughputs.append(eval_throughput)
if args.distributed:
torch.distributed.all_reduce(loss, op=torch.distributed.ReduceOp.SUM)
loss = loss / args.world_size
dllogger.log(step=(epoch,),
data={'train_throughput': train_throughput,
'hr@10': hr,
'train_epoch_time': train_time,
'validation_epoch_time': val_time,
'eval_throughput': eval_throughput,
'validation_loss': float(val_loss.item()),
'train_loss': float(loss.item())})
if hr > max_hr and args.local_rank == 0:
max_hr = hr
best_epoch = epoch
print("New best hr!")
if args.checkpoint_dir:
save_checkpoint_path = os.path.join(args.checkpoint_dir, 'model.pth')
print("Saving the model to: ", save_checkpoint_path)
torch.save(model.state_dict(), save_checkpoint_path)
best_model_timestamp = synchronized_timestamp()
if args.threshold is not None:
if hr >= args.threshold:
print("Hit threshold of {}".format(args.threshold))
break
if args.local_rank == 0:
dllogger.log(data={'best_train_throughput': max(train_throughputs),
'best_eval_throughput': max(eval_throughputs),
'mean_train_throughput': np.mean(train_throughputs),
'mean_eval_throughput': np.mean(eval_throughputs),
'best_accuracy': max_hr,
'best_epoch': best_epoch,
'time_to_target': synchronized_timestamp() - main_start_time,
'time_to_best_model': best_model_timestamp - main_start_time,
'validation_loss': float(val_loss.item()),
'train_loss': float(loss.item())},
step=tuple())
if __name__ == '__main__':
main()
|
TensorFlow2/Segmentation/MaskRCNN/scripts | scripts | benchmark_training | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script that simplifies running training benchmark """
import argparse
import os
import shutil
import subprocess
from pathlib import Path
LOCK_FILE = Path('/tmp/mrcnn_tf2.lock')
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
if __name__ == '__main__':
# CLI flags
# noinspection PyTypeChecker
parser = argparse.ArgumentParser(
description=(
'NVIDIA MaskRCNN TF2 train benchmark'
'\n\nNote: Any additional flags not specified below will be passed to main.py'
),
formatter_class=lambda prog: CustomFormatter(prog, max_help_position=100)
)
parser.add_argument('--gpus', type=int, metavar='N',
help='Number of GPU\'s. Defaults to all available')
parser.add_argument('--batch_size', type=int, required=True,
help='Batch size used during training')
parser.add_argument('--amp', action='store_true',
help='Enable automatic mixed precision')
parser.add_argument('--no_xla', action='store_true',
help='Disables XLA - accelerated linear algebra')
parser.add_argument('--data_dir', type=str, metavar='DIR', default='/data',
help='Input directory containing the dataset')
parser.add_argument('--weights_dir', type=str, metavar='DIR', default='/weights',
help='Directory containing pre-trained resnet weights')
parser.add_argument('--slurm_lock', action='store_true',
help='Prevent this script from being launched multiple times when used in multi-gpu slurm setup')
flags, remainder = parser.parse_known_args()
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../main.py'))
checkpoint_path = os.path.join(flags.weights_dir, "rn50_tf_amp_ckpt_v20.06.0/nvidia_rn50_tf_amp")
# build command
cmd = (
f'python {main_path}'
f' train'
f' --data_dir "{flags.data_dir}"'
f' --backbone_checkpoint "{checkpoint_path}"'
f' --epochs 1'
f' --steps_per_epoch 200'
f' --log_every 10'
f' --train_batch_size {flags.batch_size}'
)
if not flags.no_xla:
cmd += ' --xla'
if flags.amp:
cmd += ' --amp'
if remainder:
cmd += ' ' + ' '.join(remainder)
if flags.gpus is not None:
cmd = f'CUDA_VISIBLE_DEVICES={",".join(map(str, range(flags.gpus)))} ' + cmd
# print command
line = '-' * shutil.get_terminal_size()[0]
print(line, cmd, line, sep='\n', flush=True)
# acquire lock if --slurm_lock is provided
try:
flags.slurm_lock and LOCK_FILE.touch(exist_ok=False)
except FileExistsError:
print(f'Failed to acquire lock ({LOCK_FILE}) - skipping')
exit(0)
# run model
code = subprocess.call(cmd, shell=True)
flags.slurm_lock and LOCK_FILE.unlink()
exit(code)
|
TensorFlow/Detection/SSD/models/research/slim | slim | download_and_convert_data | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts a particular dataset.
Usage:
```shell
$ python download_and_convert_data.py \
--dataset_name=mnist \
--dataset_dir=/tmp/mnist
$ python download_and_convert_data.py \
--dataset_name=cifar10 \
--dataset_dir=/tmp/cifar10
$ python download_and_convert_data.py \
--dataset_name=flowers \
--dataset_dir=/tmp/flowers
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import download_and_convert_cifar10
from datasets import download_and_convert_flowers
from datasets import download_and_convert_mnist
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'dataset_name',
None,
'The name of the dataset to convert, one of "cifar10", "flowers", "mnist".')
tf.app.flags.DEFINE_string(
'dataset_dir',
None,
'The directory where the output TFRecords and temporary files are saved.')
def main(_):
if not FLAGS.dataset_name:
raise ValueError('You must supply the dataset name with --dataset_name')
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
if FLAGS.dataset_name == 'cifar10':
download_and_convert_cifar10.run(FLAGS.dataset_dir)
elif FLAGS.dataset_name == 'flowers':
download_and_convert_flowers.run(FLAGS.dataset_dir)
elif FLAGS.dataset_name == 'mnist':
download_and_convert_mnist.run(FLAGS.dataset_dir)
else:
raise ValueError(
'dataset_name [%s] was not recognized.' % FLAGS.dataset_name)
if __name__ == '__main__':
tf.app.run()
|
PyTorch/Classification/ConvNets/resnet50v1.5/training/AMP | AMP | DGX2V_resnet50_AMP_90E | python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX2V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
|
TensorFlow/Recommendation/WideAndDeep/utils | utils | dataloader | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.compat.v1 import logging
def separate_input_fn(
tf_transform_output,
transformed_examples,
create_batches,
mode,
reader_num_threads=1,
parser_num_threads=2,
shuffle_buffer_size=10,
prefetch_buffer_size=1,
print_display_ids=False):
"""
A version of the training + eval input function that uses dataset operations.
(For more straightforward tweaking.)
"""
logging.warn('Shuffle buffer size: {}'.format(shuffle_buffer_size))
filenames_dataset = tf.data.Dataset.list_files(
transformed_examples,
shuffle=False
)
raw_dataset = tf.data.TFRecordDataset(
filenames_dataset,
num_parallel_reads=reader_num_threads
)
if mode == tf.estimator.ModeKeys.TRAIN and shuffle_buffer_size > 1:
raw_dataset = raw_dataset.shuffle(shuffle_buffer_size)
raw_dataset = raw_dataset.repeat()
raw_dataset = raw_dataset.batch(create_batches)
# this function appears to require each element to be a vector
# batching should mean that this is always true
# one possible alternative for any problematic case is tf.io.parse_single_example
parsed_dataset = raw_dataset.apply(
tf.data.experimental.parse_example_dataset(
tf_transform_output.transformed_feature_spec(),
num_parallel_calls=parser_num_threads
)
)
# a function mapped over each dataset element
# will separate label, ensure that elements are two-dimensional (batch size, elements per record)
# adds print_display_ids injection
def consolidate_batch(elem):
label = elem.pop('label')
reshaped_label = tf.reshape(label, [-1, label.shape[-1]])
reshaped_elem = {
key: tf.reshape(elem[key], [-1, elem[key].shape[-1]])
for key in elem
}
if print_display_ids:
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['display_id'], [-1])],
message='display_id', name='print_display_ids',
summarize=elem['ad_id'].shape[1])
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['ad_id'], [-1])],
message='ad_id', name='print_ad_ids',
summarize=elem['ad_id'].shape[1])
elem['ad_id'] = tf.Print(input_=elem['ad_id'],
data=[tf.reshape(elem['is_leak'], [-1])],
message='is_leak', name='print_is_leak',
summarize=elem['ad_id'].shape[1])
return reshaped_elem, reshaped_label
if mode == tf.estimator.ModeKeys.EVAL:
parsed_dataset = parsed_dataset.map(
consolidate_batch,
num_parallel_calls=None
)
else:
parsed_dataset = parsed_dataset.map(
consolidate_batch,
num_parallel_calls=parser_num_threads
)
parsed_dataset = parsed_dataset.prefetch(prefetch_buffer_size)
return parsed_dataset
|
TensorFlow2/LanguageModeling/BERT/data | data | create_biobert_datasets_from_start | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export BERT_PREP_WORKING_DIR="${BERT_PREP_WORKING_DIR}"
# Download
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action download --dataset pubmed_baseline
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action download --dataset google_pretrained_weights # Includes vocab
# Properly format the text files
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action text_formatting --dataset pubmed_baseline
# Shard the text files
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action sharding --dataset pubmed_baseline
### BERT BASE
## UNCASED
# Create TFRecord files Phase 1
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset pubmed_baseline --max_seq_length 128 \
--max_predictions_per_seq 20 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-12_H-768_A-12/vocab.txt
# Create TFRecord files Phase 2
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset pubmed_baseline --max_seq_length 512 \
--max_predictions_per_seq 80 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/uncased_L-12_H-768_A-12/vocab.txt
## CASED
# Create TFRecord files Phase 1
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset pubmed_baseline --max_seq_length 128 \
--max_predictions_per_seq 20 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/cased_L-12_H-768_A-12/vocab.txt \
--do_lower_case=0
# Create TFRecord files Phase 2
python3 ${BERT_PREP_WORKING_DIR}/bertPrep.py --action create_tfrecord_files --dataset pubmed_baseline --max_seq_length 512 \
--max_predictions_per_seq 80 --vocab_file ${BERT_PREP_WORKING_DIR}/download/google_pretrained_weights/cased_L-12_H-768_A-12/vocab.txt \
--do_lower_case=0
|
PyTorch/Classification/GPUNet/triton/runner | runner | __init__ | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton | triton | check_accuracy | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
from tqdm import tqdm
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
Model,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
from .model import get_model
LOGGER = logging.getLogger("check_accuracy")
def _get_args():
parser = argparse.ArgumentParser(
description="Script for checking accuracy of export and conversion.", allow_abbrev=False
)
parser.add_argument("--native-model", help="Path to native model", required=True)
parser.add_argument("--native-type", help="Native model type", required=True)
parser.add_argument("--export-model", help="Path to exported model", required=True)
parser.add_argument("--export-type", help="Exported model type", required=True)
parser.add_argument("--convert-model", help="Path to converted model", required=True)
parser.add_argument("--convert-type", help="Converted model type", required=True)
parser.add_argument("--dataloader", help="Path to python module containing data loader", required=True)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.native_type)
ArgParserGenerator(Loader, module_path=args.native_model).update_argparser(parser)
Runner: BaseRunner = runners.get(args.native_type)
ArgParserGenerator(Runner).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.export_type)
ArgParserGenerator(Loader, module_path=args.export_model).update_argparser(parser)
Runner: BaseRunner = runners.get(args.export_type)
ArgParserGenerator(Runner).update_argparser(parser)
if args.convert_type != 'trt':
Loader: BaseLoader = loaders.get(args.convert_type)
ArgParserGenerator(Loader, module_path=args.convert_model).update_argparser(parser)
Runner: BaseRunner = runners.get(args.convert_type)
ArgParserGenerator(Runner).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
LOGGER.info(f"Loading {args.native_model}")
Runner: BaseRunner = runners.get(args.native_type)
runner_native = ArgParserGenerator(Runner).from_args(args)
model_native, _ = get_model(model_dir= args.native_model)
model_native = Model(handle=model_native, precision=None, inputs=None, outputs=['target__0'])
LOGGER.info(f"Loading {args.export_model}")
Loader: BaseLoader = loaders.get(args.export_type)
Runner: BaseRunner = runners.get(args.export_type)
loader = ArgParserGenerator(Loader, module_path=args.export_model).from_args(args)
runner_export = ArgParserGenerator(Runner).from_args(args)
model_export = loader.load(args.export_model)
if args.convert_type != 'trt':
LOGGER.info(f"Loading {args.convert_model}")
Loader: BaseLoader = loaders.get(args.convert_type)
Runner: BaseRunner = runners.get(args.convert_type)
loader = ArgParserGenerator(Loader, module_path=args.convert_model).from_args(args)
runner_convert = ArgParserGenerator(Runner).from_args(args)
model_convert = loader.load(args.convert_model)
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
ids, x, y_real = next(dataloader_fn())
with runner_native.init_inference(model=model_native) as runner_session:
y_pred_native = runner_session(x)
del model_native
del runner_native
with runner_export.init_inference(model=model_export) as runner_session:
y_pred_export = runner_session(x)
del model_export
del runner_export
e1 = [np.linalg.norm(y_pred_native[k]-y_pred_export[k]) for k in y_pred_native.keys()]
assert all([i < 1e-3 for i in e1]), "Error between native and export is {}, limit is 1e-3".format(e1)
if args.convert_type != 'trt':
with runner_convert.init_inference(model=model_convert) as runner_session:
y_pred_convert = runner_session(x)
e2 = [np.linalg.norm(y_pred_convert[k]-y_pred_export[k]) for k in y_pred_native.keys()]
assert all([i < 1e-3 for i in e2]), "Error between export and convert is {}, limit is 1e-3".format(e2)
if __name__ == "__main__":
main()
|
PyTorch/Segmentation/MaskRCNN/pytorch | pytorch | requirements | mlperf-compliance==0.0.10
opencv-python==4.4.0.42
yacs
git+https://github.com/NVIDIA/cocoapi.git@nvidia/master#egg=cocoapi&subdirectory=PythonAPI
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_resnet101_pets | # Faster R-CNN with Resnet-101 (v1) configured for the Oxford-IIIT Pet Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 37
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
load_all_detection_checkpoint_vars: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
num_examples: 1101
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | decoderBuilderPlain | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "decoderBuilderPlain.h"
#include "attentionLayerCreator.h"
#include "decoderInstance.h"
#include "dims5.h"
#include "engineCache.h"
#include "lstm.h"
#include "utils.h"
#include <stdexcept>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NUM_PRENET_LAYERS = 2;
constexpr const char* const INPUT_MASK_NAME = DecoderInstance::INPUT_MASK_NAME;
constexpr const char* const INPUT_LENGTH_NAME = DecoderInstance::INPUT_LENGTH_NAME;
constexpr const char* const INPUT_DROPOUT_NAME = DecoderInstance::INPUT_DROPOUT_NAME;
constexpr const char* const INPUT_LASTFRAME_NAME = DecoderInstance::INPUT_LASTFRAME_NAME;
constexpr const char* const INPUT_MEMORY_NAME = DecoderInstance::INPUT_MEMORY_NAME;
constexpr const char* const INPUT_PROCESSED_NAME = DecoderInstance::INPUT_PROCESSED_NAME;
constexpr const char* const INPUT_WEIGHTS_NAME = DecoderInstance::INPUT_WEIGHTS_NAME;
constexpr const char* const INPUT_CONTEXT_NAME = DecoderInstance::INPUT_CONTEXT_NAME;
constexpr const char* const INPUT_ATTENTIONHIDDEN_NAME = DecoderInstance::INPUT_ATTENTIONHIDDEN_NAME;
constexpr const char* const INPUT_ATTENTIONCELL_NAME = DecoderInstance::INPUT_ATTENTIONCELL_NAME;
constexpr const char* const INPUT_DECODERHIDDEN_NAME = DecoderInstance::INPUT_DECODERHIDDEN_NAME;
constexpr const char* const INPUT_DECODERCELL_NAME = DecoderInstance::INPUT_DECODERCELL_NAME;
constexpr const char* const OUTPUT_ATTENTIONHIDDEN_NAME = DecoderInstance::OUTPUT_ATTENTIONHIDDEN_NAME;
constexpr const char* const OUTPUT_ATTENTIONCELL_NAME = DecoderInstance::OUTPUT_ATTENTIONCELL_NAME;
constexpr const char* const OUTPUT_CONTEXT_NAME = DecoderInstance::OUTPUT_CONTEXT_NAME;
constexpr const char* const OUTPUT_WEIGHTS_NAME = DecoderInstance::OUTPUT_WEIGHTS_NAME;
constexpr const char* const OUTPUT_DECODERHIDDEN_NAME = DecoderInstance::OUTPUT_DECODERHIDDEN_NAME;
constexpr const char* const OUTPUT_DECODERCELL_NAME = DecoderInstance::OUTPUT_DECODERCELL_NAME;
constexpr const char* const OUTPUT_CHANNELS_NAME = DecoderInstance::OUTPUT_CHANNELS_NAME;
constexpr const char* const OUTPUT_GATE_NAME = DecoderInstance::OUTPUT_GATE_NAME;
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
DecoderBuilderPlain::DecoderBuilderPlain(const int inputLength, const int numDim, const int numChannels)
: mInputLength(inputLength)
, mNumEncodingDim(numDim)
, mNumPrenetDim(256)
, mNumAttentionRNNDim(1024)
, mNumAttentionDim(128)
, mNumAttentionFilters(32)
, mAttentionKernelSize(31)
, mNumLSTMDim(1024)
, mNumChannels(numChannels)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
TRTPtr<ICudaEngine> DecoderBuilderPlain::build(
IBuilder& builder,
IModelImporter& importer,
const int maxBatchSize,
const bool useFP16)
{
TRTPtr<INetworkDefinition> network(builder.createNetworkV2(0));
network->setName("Tacotron2_DecoderWithoutPlugins");
// PRENET ///////////////////////////////////////////////////////////////////
ITensor* prenetInput = network->addInput(
INPUT_LASTFRAME_NAME, DataType::kFLOAT, Dims4{1, mNumChannels + 1, 1, 1});
ITensor* dropoutInput = network->addInput(
INPUT_DROPOUT_NAME, DataType::kFLOAT, Dims4{1, mNumPrenetDim, 1, 1});
ISliceLayer* inputSlice = network->addSlice(
*prenetInput,
Dims4(0, 0, 0, 0),
Dims4(1, mNumChannels, 1, 1),
Dims4(1, 1, 1, 1));
inputSlice->setName("decoder.frame_slice");
prenetInput = inputSlice->getOutput(0);
for (int layer = 0; layer < NUM_PRENET_LAYERS; ++layer) {
const LayerData* const linearData = importer.getWeights(
{"decoder", "prenet", "layers", std::to_string(layer), "linear_layer"});
ILayer* const linearLayer = network->addFullyConnected(
*prenetInput,
mNumPrenetDim,
linearData->get("weight"),
Weights{DataType::kFLOAT, nullptr, 0});
linearLayer->setName(
std::string(
"decoder.prenet.layers." + std::to_string(layer) + ".linear_layer")
.c_str());
ILayer* const reluLayer = network->addActivation(
*linearLayer->getOutput(0), ActivationType::kRELU);
reluLayer->setName(
std::string("decoder.prenet.layers." + std::to_string(layer) + ".relu")
.c_str());
IElementWiseLayer* const elemLayer = network->addElementWise(
*reluLayer->getOutput(0), *dropoutInput, ElementWiseOperation::kPROD);
elemLayer->setName(
std::string(
"decoder.prenet.layers." + std::to_string(layer) + ".dropout")
.c_str());
prenetInput = elemLayer->getOutput(0);
}
ITensor* const prenetOutput = prenetInput;
// ATTENTION LSTM ///////////////////////////////////////////////////////////
ITensor* const attentionContextInput
= network->addInput(INPUT_CONTEXT_NAME, DataType::kFLOAT, Dims3{1, 1, mNumEncodingDim});
ITensor* const attentionRNNHidden
= network->addInput(INPUT_ATTENTIONHIDDEN_NAME, DataType::kFLOAT, Dims3{1, 1, mNumAttentionRNNDim});
ITensor* const attentionRNNCell
= network->addInput(INPUT_ATTENTIONCELL_NAME, DataType::kFLOAT, Dims3{1, 1, mNumAttentionRNNDim});
const LayerData* const lstmData = importer.getWeights({"decoder", "attention_rnn"});
IShuffleLayer* const prenetShuffle = network->addShuffle(*prenetOutput);
prenetShuffle->setReshapeDimensions(Dims3{1, 1, -1});
std::array<ITensor*, 2> lstmInputs{prenetShuffle->getOutput(0), attentionContextInput};
IConcatenationLayer* lstmConcatLayer
= network->addConcatenation(lstmInputs.data(), static_cast<int>(lstmInputs.size()));
lstmConcatLayer->setAxis(2);
lstmConcatLayer->setName("decoder.attention_rnn.concat");
ILayer* attentionLSTMLayer = LSTM::addUnidirectionalCell(network.get(), lstmConcatLayer->getOutput(0),
attentionRNNHidden, attentionRNNCell, mNumAttentionRNNDim, *lstmData);
ITensor* const attentionHiddenOut = attentionLSTMLayer->getOutput(1);
ITensor* const attentionCellOut = attentionLSTMLayer->getOutput(2);
attentionLSTMLayer->setName("decoder.attention_rnn");
attentionHiddenOut->setName(OUTPUT_ATTENTIONHIDDEN_NAME);
network->markOutput(*attentionHiddenOut);
attentionCellOut->setName(OUTPUT_ATTENTIONCELL_NAME);
network->markOutput(*attentionCellOut);
// ATTENTION ////////////////////////////////////////////////////////////////
ITensor* const inputMemory
= network->addInput(INPUT_MEMORY_NAME, DataType::kFLOAT, Dims3{1, mInputLength, mNumEncodingDim});
ITensor* const inputProcessedMemory
= network->addInput(INPUT_PROCESSED_NAME, DataType::kFLOAT, Dims5{1, mInputLength, mNumAttentionDim, 1, 1});
ITensor* const inputWeights = network->addInput(INPUT_WEIGHTS_NAME, DataType::kFLOAT, Dims4{1, 2, mInputLength, 1});
ITensor* const inputMask = network->addInput(INPUT_MASK_NAME, DataType::kFLOAT, Dims3{1, 1, mInputLength});
ITensor* const inputMaskLength = network->addInput(INPUT_LENGTH_NAME, DataType::kINT32, Dims2{1, 1});
// reshape data to go from {1,1,X} to {1,1,X,1,1}
IShuffleLayer* const queryShuffleLayer = network->addShuffle(*attentionHiddenOut);
queryShuffleLayer->setReshapeDimensions(Dims5{1, 1, attentionHiddenOut->getDimensions().d[2], 1, 1});
queryShuffleLayer->setName("decoder.attention_layer.query_layer.unsqueeze");
ITensor* const queryInput = queryShuffleLayer->getOutput(0);
const LayerData* const queryData
= importer.getWeights({"decoder", "attention_layer", "query_layer", "linear_layer"});
ILayer* const queryLayer = network->addFullyConnected(
*queryInput, mNumAttentionDim, queryData->get("weight"), Weights{DataType::kFLOAT, nullptr, 0});
queryLayer->setName("decoder.attention_layer.query_layer.linear_layer");
// build location layers
const LayerData* const locationConvData
= importer.getWeights({"decoder", "attention_layer", "location_layer", "location_conv", "conv"});
const LayerData* const locationLinearData
= importer.getWeights({"decoder", "attention_layer", "location_layer", "location_dense", "linear_layer"});
ILayer* const locationLayer
= AttentionLayerCreator::addLocation(*network, inputWeights, mNumAttentionDim, mNumAttentionFilters,
mAttentionKernelSize, *locationConvData, *locationLinearData, "decoder.attention_layer.location_layer");
const LayerData* const energyData = importer.getWeights({"decoder", "attention_layer", "v", "linear_layer"});
IShuffleLayer* const locationShuffleLayer = network->addShuffle(*locationLayer->getOutput(0));
locationShuffleLayer->setReshapeDimensions(
Dims5{locationLayer->getOutput(0)->getDimensions().d[0], locationLayer->getOutput(0)->getDimensions().d[1],
locationLayer->getOutput(0)->getDimensions().d[2], locationLayer->getOutput(0)->getDimensions().d[3], 1});
ILayer* const energyLayer = AttentionLayerCreator::addEnergy(*network, queryLayer->getOutput(0),
locationShuffleLayer->getOutput(0), inputProcessedMemory, *energyData, "decoder.attention_layer.v");
IShuffleLayer* const squeezeEnergyLayer = network->addShuffle(*energyLayer->getOutput(0));
squeezeEnergyLayer->setReshapeDimensions(
Dims4(energyLayer->getOutput(0)->getDimensions().d[0], energyLayer->getOutput(0)->getDimensions().d[1],
energyLayer->getOutput(0)->getDimensions().d[2], energyLayer->getOutput(0)->getDimensions().d[3]));
ILayer* const softMaxLayer = AttentionLayerCreator::addPaddedSoftMax(*network, squeezeEnergyLayer->getOutput(0),
inputMask, inputMaskLength, "decoder.attention_layer.softmax_layer");
IShuffleLayer* const transLayer = network->addShuffle(*softMaxLayer->getOutput(0));
transLayer->setFirstTranspose({2, 1, 0});
transLayer->setName("decoder.attention_layer.softmax_transpose");
ITensor* const attentionWeight = transLayer->getOutput(0);
ILayer* const sliceWeightsLayer
= network->addSlice(*inputWeights, Dims4{0, 1, 0, 0}, Dims4{1, 1, mInputLength, 1}, Dims4{1, 1, 1, 1});
IShuffleLayer* const squeezeWeightsLayer = network->addShuffle(*sliceWeightsLayer->getOutput(0));
squeezeWeightsLayer->setReshapeDimensions(Dims3(sliceWeightsLayer->getOutput(0)->getDimensions().d[0],
sliceWeightsLayer->getOutput(0)->getDimensions().d[1], sliceWeightsLayer->getOutput(0)->getDimensions().d[2]));
ILayer* const sumLayer
= network->addElementWise(*attentionWeight, *squeezeWeightsLayer->getOutput(0), ElementWiseOperation::kSUM);
sumLayer->setName("decoder.attention_layer.weight_sum_layer");
std::vector<ITensor*> weightOutputs{attentionWeight, sumLayer->getOutput(0)};
IConcatenationLayer* const outputWeightConcat
= network->addConcatenation(weightOutputs.data(), static_cast<int>(weightOutputs.size()));
outputWeightConcat->setAxis(2);
outputWeightConcat->setName("decoder.attention_weights.concat");
ITensor* const attentionWeightOutput = outputWeightConcat->getOutput(0);
#if NV_TENSORRT_MAJOR < 6
ILayer* const mmLayer = network->addMatrixMultiply(*attentionWeight, false, *inputMemory, false);
#else
ILayer* const mmLayer
= network->addMatrixMultiply(*attentionWeight, MatrixOperation::kNONE, *inputMemory, MatrixOperation::kNONE);
#endif
mmLayer->setName("decoder.attention_layer.mm");
ITensor* const attentionContextOutput = mmLayer->getOutput(0);
attentionWeightOutput->setName(OUTPUT_WEIGHTS_NAME);
network->markOutput(*attentionWeightOutput);
attentionContextOutput->setName(OUTPUT_CONTEXT_NAME);
network->markOutput(*attentionContextOutput);
// DECODER LSTM /////////////////////////////////////////////////////////////
ITensor* const inputDecoderHidden
= network->addInput(INPUT_DECODERHIDDEN_NAME, DataType::kFLOAT, Dims3{1, 1, mNumLSTMDim});
ITensor* const inputDecoderCell
= network->addInput(INPUT_DECODERCELL_NAME, DataType::kFLOAT, Dims3{1, 1, mNumLSTMDim});
const LayerData* const decoderLSTMData = importer.getWeights({"decoder", "decoder_rnn"});
std::array<ITensor*, 2> decoderLSTMConcatInputs{attentionHiddenOut, attentionContextOutput};
IConcatenationLayer* concatLayer = network->addConcatenation(decoderLSTMConcatInputs.data(), 2);
concatLayer->setAxis(2);
concatLayer->setName("decoder.decoder_rnn.concat");
ILayer* const decoderLSTMLayer = LSTM::addUnidirectionalCell(
network.get(), concatLayer->getOutput(0), inputDecoderHidden, inputDecoderCell, mNumLSTMDim, *decoderLSTMData);
decoderLSTMLayer->setName("decoder.decoder_rnn");
ITensor* const decoderHiddenOut = decoderLSTMLayer->getOutput(1);
ITensor* const decoderCellOut = decoderLSTMLayer->getOutput(2);
decoderHiddenOut->setName(OUTPUT_DECODERHIDDEN_NAME);
network->markOutput(*decoderHiddenOut);
decoderCellOut->setName(OUTPUT_DECODERCELL_NAME);
network->markOutput(*decoderCellOut);
// PROJECTION ///////////////////////////////////////////////////////////////
const LayerData* const channelData = importer.getWeights({"decoder", "linear_projection", "linear_layer"});
const LayerData* const gateData = importer.getWeights({"decoder", "gate_layer", "linear_layer"});
IShuffleLayer* const projHiddenShuffleLayer = network->addShuffle(*decoderHiddenOut);
projHiddenShuffleLayer->setReshapeDimensions(Dims4{1, -1, 1, 1});
projHiddenShuffleLayer->setName("decoder.decoder_rnn.hidden.unsqueeze");
IShuffleLayer* const projContextShuffleLayer = network->addShuffle(*attentionContextOutput);
projContextShuffleLayer->setReshapeDimensions(Dims4{1, -1, 1, 1});
projContextShuffleLayer->setName("decoder.attention_context.unsqueeze");
std::array<ITensor*, 2> projectionInputs{
projHiddenShuffleLayer->getOutput(0), projContextShuffleLayer->getOutput(0)};
IConcatenationLayer* const projConcatLayer
= network->addConcatenation(projectionInputs.data(), projectionInputs.size());
projConcatLayer->setAxis(1);
projConcatLayer->setName("decoder.projection.concat");
// we'll merge these two tensors layer wise for the weights
std::vector<float> projectionWeightData(channelData->get("weight").count + gateData->get("weight").count);
std::copy(static_cast<const float*>(channelData->get("weight").values),
static_cast<const float*>(channelData->get("weight").values) + channelData->get("weight").count,
projectionWeightData.data());
std::copy(static_cast<const float*>(gateData->get("weight").values),
static_cast<const float*>(gateData->get("weight").values) + gateData->get("weight").count,
projectionWeightData.data() + channelData->get("weight").count);
std::vector<float> projectionBiasData(channelData->get("bias").count + gateData->get("bias").count);
std::copy(static_cast<const float*>(channelData->get("bias").values),
static_cast<const float*>(channelData->get("bias").values) + channelData->get("bias").count,
projectionBiasData.data());
std::copy(static_cast<const float*>(gateData->get("bias").values),
static_cast<const float*>(gateData->get("bias").values) + gateData->get("bias").count,
projectionBiasData.data() + channelData->get("bias").count);
ILayer* const projLayer = network->addFullyConnected(*projConcatLayer->getOutput(0), mNumChannels + 1,
Weights{DataType::kFLOAT, projectionWeightData.data(), static_cast<int64_t>(projectionWeightData.size())},
Weights{DataType::kFLOAT, projectionBiasData.data(), static_cast<int64_t>(projectionBiasData.size())});
projLayer->setName("decoder.linear_projection.linear_layer");
ITensor* const outputChannels = projLayer->getOutput(0);
outputChannels->setName(OUTPUT_CHANNELS_NAME);
network->markOutput(*outputChannels);
// build engine
TRTPtr<IBuilderConfig> config(builder.createBuilderConfig());
config->setMaxWorkspaceSize(1ULL << 29); // 512 MB
if (useFP16)
{
config->setFlag(BuilderFlag::kFP16);
}
builder.setMaxBatchSize(maxBatchSize);
TRTPtr<ICudaEngine> engine(
builder.buildEngineWithConfig(*network, *config));
if (!engine)
{
throw std::runtime_error("Failed to build Tacotron2::DecoderPlain engine.");
}
return engine;
}
} // namespace tts
|
TensorFlow/LanguageModeling/BERT/data | data | GooglePretrainedWeightDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import urllib.request
import zipfile
class GooglePretrainedWeightDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/google_pretrained_weights'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
# Download urls
self.model_urls = {
'bert_base_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip', 'uncased_L-12_H-768_A-12.zip'),
'bert_large_uncased': ('https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip', 'uncased_L-24_H-1024_A-16.zip'),
'bert_base_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip', 'cased_L-12_H-768_A-12.zip'),
'bert_large_cased': ('https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip', 'cased_L-24_H-1024_A-16.zip'),
'bert_base_multilingual_cased': ('https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip', 'multi_cased_L-12_H-768_A-12.zip'),
'bert_large_multilingual_uncased': ('https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip', 'multilingual_L-12_H-768_A-12.zip'),
'bert_base_chinese': ('https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip', 'chinese_L-12_H-768_A-12.zip')
}
# SHA256sum verification for file download integrity (and checking for changes from the download source over time)
self.bert_base_uncased_sha = {
'bert_config.json': '7b4e5f53efbd058c67cda0aacfafb340113ea1b5797d9ce6ee411704ba21fcbc',
'bert_model.ckpt.data-00000-of-00001': '58580dc5e0bf0ae0d2efd51d0e8272b2f808857f0a43a88aaf7549da6d7a8a84',
'bert_model.ckpt.index': '04c1323086e2f1c5b7c0759d8d3e484afbb0ab45f51793daab9f647113a0117b',
'bert_model.ckpt.meta': 'dd5682170a10c3ea0280c2e9b9a45fee894eb62da649bbdea37b38b0ded5f60e',
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
}
self.bert_large_uncased_sha = {
'bert_config.json': 'bfa42236d269e2aeb3a6d30412a33d15dbe8ea597e2b01dc9518c63cc6efafcb',
'bert_model.ckpt.data-00000-of-00001': 'bc6b3363e3be458c99ecf64b7f472d2b7c67534fd8f564c0556a678f90f4eea1',
'bert_model.ckpt.index': '68b52f2205ffc64dc627d1120cf399c1ef1cbc35ea5021d1afc889ffe2ce2093',
'bert_model.ckpt.meta': '6fcce8ff7628f229a885a593625e3d5ff9687542d5ef128d9beb1b0c05edc4a1',
'vocab.txt': '07eced375cec144d27c900241f3e339478dec958f92fddbc551f295c992038a3',
}
self.bert_base_cased_sha = {
'bert_config.json': 'f11dfb757bea16339a33e1bf327b0aade6e57fd9c29dc6b84f7ddb20682f48bc',
'bert_model.ckpt.data-00000-of-00001': '734d5a1b68bf98d4e9cb6b6692725d00842a1937af73902e51776905d8f760ea',
'bert_model.ckpt.index': '517d6ef5c41fc2ca1f595276d6fccf5521810d57f5a74e32616151557790f7b1',
'bert_model.ckpt.meta': '5f8a9771ff25dadd61582abb4e3a748215a10a6b55947cbb66d0f0ba1694be98',
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
}
self.bert_large_cased_sha = {
'bert_config.json': '7adb2125c8225da495656c982fd1c5f64ba8f20ad020838571a3f8a954c2df57',
'bert_model.ckpt.data-00000-of-00001': '6ff33640f40d472f7a16af0c17b1179ca9dcc0373155fb05335b6a4dd1657ef0',
'bert_model.ckpt.index': 'ef42a53f577fbe07381f4161b13c7cab4f4fc3b167cec6a9ae382c53d18049cf',
'bert_model.ckpt.meta': 'd2ddff3ed33b80091eac95171e94149736ea74eb645e575d942ec4a5e01a40a1',
'vocab.txt': 'eeaa9875b23b04b4c54ef759d03db9d1ba1554838f8fb26c5d96fa551df93d02',
}
self.bert_base_multilingual_cased_sha = {
'bert_config.json': 'e76c3964bc14a8bb37a5530cdc802699d2f4a6fddfab0611e153aa2528f234f0',
'bert_model.ckpt.data-00000-of-00001': '55b8a2df41f69c60c5180e50a7c31b7cdf6238909390c4ddf05fbc0d37aa1ac5',
'bert_model.ckpt.index': '7d8509c2a62b4e300feb55f8e5f1eef41638f4998dd4d887736f42d4f6a34b37',
'bert_model.ckpt.meta': '95e5f1997e8831f1c31e5cf530f1a2e99f121e9cd20887f2dce6fe9e3343e3fa',
'vocab.txt': 'fe0fda7c425b48c516fc8f160d594c8022a0808447475c1a7c6d6479763f310c',
}
self.bert_large_multilingual_uncased_sha = {
'bert_config.json': '49063bb061390211d2fdd108cada1ed86faa5f90b80c8f6fdddf406afa4c4624',
'bert_model.ckpt.data-00000-of-00001': '3cd83912ebeb0efe2abf35c9f1d5a515d8e80295e61c49b75c8853f756658429',
'bert_model.ckpt.index': '87c372c1a3b1dc7effaaa9103c80a81b3cbab04c7933ced224eec3b8ad2cc8e7',
'bert_model.ckpt.meta': '27f504f34f02acaa6b0f60d65195ec3e3f9505ac14601c6a32b421d0c8413a29',
'vocab.txt': '87b44292b452f6c05afa49b2e488e7eedf79ea4f4c39db6f2f4b37764228ef3f',
}
self.bert_base_chinese_sha = {
'bert_config.json': '7aaad0335058e2640bcb2c2e9a932b1cd9da200c46ea7b8957d54431f201c015',
'bert_model.ckpt.data-00000-of-00001': '756699356b78ad0ef1ca9ba6528297bcb3dd1aef5feadd31f4775d7c7fc989ba',
'bert_model.ckpt.index': '46315546e05ce62327b3e2cd1bed22836adcb2ff29735ec87721396edb21b82e',
'bert_model.ckpt.meta': 'c0f8d51e1ab986604bc2b25d6ec0af7fd21ff94cf67081996ec3f3bf5d823047',
'vocab.txt': '45bbac6b341c319adc98a532532882e91a9cefc0329aa57bac9ae761c27b291c',
}
# Relate SHA to urls for loop below
self.model_sha = {
'bert_base_uncased': self.bert_base_uncased_sha,
'bert_large_uncased': self.bert_large_uncased_sha,
'bert_base_cased': self.bert_base_cased_sha,
'bert_large_cased': self.bert_large_cased_sha,
'bert_base_multilingual_cased': self.bert_base_multilingual_cased_sha,
'bert_large_multilingual_uncased': self.bert_large_multilingual_uncased_sha,
'bert_base_chinese': self.bert_base_chinese_sha
}
# Helper to get sha256sum of a file
def sha256sum(self, filename):
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda : f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def download(self):
# Iterate over urls: download, unzip, verify sha256sum
found_mismatch_sha = False
for model in self.model_urls:
url = self.model_urls[model][0]
file = self.save_path + '/' + self.model_urls[model][1]
print('Downloading', url)
response = urllib.request.urlopen(url)
with open(file, 'wb') as handle:
handle.write(response.read())
print('Unzipping', file)
zip = zipfile.ZipFile(file, 'r')
zip.extractall(self.save_path)
zip.close()
sha_dict = self.model_sha[model]
for extracted_file in sha_dict:
sha = sha_dict[extracted_file]
if sha != self.sha256sum(file[:-4] + '/' + extracted_file):
found_mismatch_sha = True
print('SHA256sum does not match on file:', extracted_file, 'from download url:', url)
else:
print(file[:-4] + '/' + extracted_file, '\t', 'verified')
if not found_mismatch_sha:
print("All downloads pass sha256sum verification.")
def serialize(self):
pass
def deserialize(self):
pass
def listAvailableWeights(self):
print("Available Weight Datasets")
for item in self.model_urls:
print(item)
def listLocallyStoredWeights(self):
pass
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | task | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
logs_dir: pathlib.Path = pathlib.Path("/var/logs"),
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operations library path
triton_load_model_method: Method how models are loaded on Triton
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.logs_dir = logs_dir
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
|
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/evaluation | evaluation | evaluation_AMP_A100-80G | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export CUDA_VISIBLE_DEVICES=0
export TF_GPU_HOST_MEM_LIMIT_IN_MB=131072
python main.py \
--cfg config/efficientnet_v2/s_cfg.py \
--mode eval \
--use_amp \
--use_xla \
--eval_batch_size 128 \
--eval_img_size 384 \
--model_dir ./output/expXX \
--n_repeat_eval 4 \
--moving_average_decay 0.9999 # enables evaluation using EMA weights too
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | random | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef TT2I_RANDOM_H
#define TT2I_RANDOM_H
#include "cuda_runtime.h"
#include "curand_kernel.h"
namespace tts
{
class Random
{
public:
/**
* @brief Create a new Random object.
*
* @param numStates The number of internal states to use.
* @param seed The seed to set.
*/
Random(int numStates, unsigned int seed = 0);
// disable copying
Random(const Random& rand) = delete;
Random& operator=(const Random& rand) = delete;
/**
* @brief Destructor (cleanup random states and memory).
*/
~Random();
/**
* @brief Set the seed of the number generator.
*
* @param seed The seed to use.
*/
void setSeed(unsigned int seed, cudaStream_t stream = 0);
/**
* @brief Get the random states on the device.
*
* @return The random states.
*/
curandState_t* getRandomStates();
/**
* @brief Get the number of random states.
*
* @return The number.
*/
int size() const
{
return mNumStates;
}
private:
int mNumStates;
curandState_t* mRandStateDevice;
};
} // namespace tts
#endif
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | prefetcher | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides functions to prefetch tensors to feed into models."""
import tensorflow as tf
def prefetch(tensor_dict, capacity):
"""Creates a prefetch queue for tensors.
Creates a FIFO queue to asynchronously enqueue tensor_dicts and returns a
dequeue op that evaluates to a tensor_dict. This function is useful in
prefetching preprocessed tensors so that the data is readily available for
consumers.
Example input pipeline when you don't need batching:
----------------------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
prefetch_queue = prefetcher.prefetch(tensor_dict, capacity=20)
tensor_dict = prefetch_queue.dequeue()
outputs = Model(tensor_dict)
...
----------------------------------------------------
For input pipelines with batching, refer to core/batcher.py
Args:
tensor_dict: a dictionary of tensors to prefetch.
capacity: the size of the prefetch queue.
Returns:
a FIFO prefetcher queue
"""
names = list(tensor_dict.keys())
dtypes = [t.dtype for t in tensor_dict.values()]
shapes = [t.get_shape() for t in tensor_dict.values()]
prefetch_queue = tf.PaddingFIFOQueue(capacity, dtypes=dtypes,
shapes=shapes,
names=names,
name='prefetch_queue')
enqueue_op = prefetch_queue.enqueue(tensor_dict)
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
prefetch_queue, [enqueue_op]))
tf.summary.scalar('queue/%s/fraction_of_%d_full' % (prefetch_queue.name,
capacity),
tf.to_float(prefetch_queue.size()) * (1. / capacity))
return prefetch_queue
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | config_NVIDIA-A30 | batching: dynamic
checkpoints:
- name: widedeep_tf2_amp_base_128k_nvtabular
url: ''
configurations:
- checkpoint: widedeep_tf2_amp_base_128k_nvtabular
parameters:
backend_accelerator: amp
checkpoint: widedeep_tf2_amp_base_128k_nvtabular
device_kind: gpu
export_format: tf-savedmodel
export_precision: fp32
format: tf-savedmodel
max_batch_size: 131072
number_of_model_instances: 2
precision: fp32
tensorrt_capture_cuda_graph: 0
torch_jit: none
- checkpoint: widedeep_tf2_amp_base_128k_nvtabular
parameters:
backend_accelerator: none
checkpoint: widedeep_tf2_amp_base_128k_nvtabular
device_kind: gpu
export_format: tf-savedmodel
export_precision: fp16
format: trt
max_batch_size: 131072
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 1
torch_jit: none
container_version: '22.02'
datasets:
- name: outbrain
datasets_dir: datasets
ensemble_model_name: null
framework: TensorFlow2
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: WidenDeep
performance_tool: perf_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:22.02-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/Segmentation/nnUNet/notebooks | notebooks | BraTS22 | #!/usr/bin/env python
# coding: utf-8
# # nnU-Net for BraTS22
#
# # Table of contents
# - [Introduction](#introduction)
# - [Dataset](#dataset)
# - [Data pre-processing](#preprocessing)
# - [Data augmentations](#augmentations)
# - [Loss function](#loss)
# - [Model](#model)
# - [Training](#training)
# - [Inference](#inference)
# - [Post-processing](#postprocessing)
#
# # Introduction <a name="introduction"></a>
#
# The goal of [BraTS 2022 challenge](https://www.synapse.org/#!Synapse:syn27046444/wiki/616571) was to create a model for segmenting the brain glioblastoma subregions in mpMRI scans. In the 2022 edition we've improved [our last year solution](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/nnUNet/notebooks/BraTS21.ipynb) that has [won the validation phase](https://developer.nvidia.com/blog/nvidia-data-scientists-take-top-spots-in-miccai-2021-brain-tumor-segmentation-challenge) and was [ranked 3 in the test phase](https://www.rsna.org/education/ai-resources-and-training/ai-image-challenge/brain-tumor-ai-challenge-2021).
#
# In this notebook, we will share with you the recipe we used for training our U-Net model for BraTS22 challenge, so that you can reproduce our results. In particular, we will walk you through the following steps: data pre-processing, designing the loss function, building and training the model, running inference and finally post-processing the predictions.
#
# # Dataset <a name="dataset"></a>
#
# The training dataset provided for the BraTS22 was the same as for 2021 edition and consists of 1,251 brain mpMRI scans along with segmentation annotations of tumorous regions. The 3D volumes were skull-stripped and resampled to 1 mm isotropic resolution, with dimensions of (240, 240, 155) voxels. For each example, four modalities were given: Fluid Attenuated Inversion Recovery (FLAIR), native (T1), post-contrast T1-weighted (T1Gd), and T2-weighted (T2). See image below with each modality. Annotations consist of four classes: 1 for necrotic tumor core (NCR), 2 for peritumoral edematous tissue (ED), 4 for enhancing tumor (ET), and 0 for background (voxels that are not part of the tumor).
#
# To download the training and validation dataset, you need to have an account on https://www.synapse.org platform and be registered for BraTS21 challenge. We will assume that after downloading and unzipping, the dataset is organized as follows:
#
# ```
# /data
# │
# ├───BraTS2021_train
# │ ├──BraTS2021_00000
# │ │ └──BraTS2021_00000_flair.nii.gz
# │ │ └──BraTS2021_00000_t1.nii.gz
# │ │ └──BraTS2021_00000_t1ce.nii.gz
# │ │ └──BraTS2021_00000_t2.nii.gz
# │ │ └──BraTS2021_00000_seg.nii.gz
# │ ├──BraTS2021_00002
# │ │ └──BraTS2021_00002_flair.nii.gz
# │ ... └──...
# │
# └────BraTS2021_val
# ├──BraTS2021_00001
# │ └──BraTS2021_00001_flair.nii.gz
# │ └──BraTS2021_00001_t1.nii.gz
# │ └──BraTS2021_00001_t1ce.nii.gz
# │ └──BraTS2021_00001_t2.nii.gz
# ├──BraTS2021_00002
# │ └──BraTS2021_00002_flair.nii.gz
# ... └──...
# ```
#
# Let's visualize a BraTS2021_00000 example from the training dataset. Each plot presents a different modality (from left to right: FLAIR, T1, T1ce, T2), and an annotation.
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from glob import glob
imgs = [nib.load(f"/data/BraTS2021_train/BraTS2021_00000/BraTS2021_00000_{m}.nii.gz").get_fdata().astype(np.float32)[:, :, 75] for m in ["flair", "t1", "t1ce", "t2"]]
lbl = nib.load("/data/BraTS2021_train/BraTS2021_00000/BraTS2021_00000_seg.nii.gz").get_fdata().astype(np.uint8)[:, :, 75]
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(15, 15))
for i, img in enumerate(imgs):
ax[i].imshow(img, cmap='gray')
ax[i].axis('off')
ax[-1].imshow(lbl, vmin=0, vmax=4)
ax[-1].axis('off')
plt.tight_layout()
plt.show()
# # Data pre-processing <a name="preprocessing"></a>
#
# Each example of the BraTS22 dataset consists of four [NIfTI](https://nifti.nimh.nih.gov/) files with different MRI modalities (filenames with suffixes flair, t1, t1ce, t2). Additionally, examples in the training dataset have a NIfTI with annotation (filename with suffix seg). As a first step of data pre-processing, all four modalities are stacked such that each example has shape (4, 240, 240, 155) (input tensor is in the (C, H, W, D) layout, where C-channels, H-height, W-width and D-depth). Then redundant background voxels (with voxel value zero) on the borders of each volume are [cropped](https://docs.monai.io/en/latest/transforms.html#cropforeground), as they do not provide any useful information and can be ignored by the neural network. Subsequently, for each example, the mean and the standard deviation are computed within the non-zero region for each channel separately. All volumes are [normalized](https://docs.monai.io/en/latest/transforms.html#normalizeintensityd) by first subtracting the mean and then divided by the standard deviation. The background voxels are not normalized so that their value remained at zero. To distinguish between background voxels and normalized voxels which have values close to zero, we add an input channel with one-hot encoding for foreground voxels and stacked with the input data. As a result, each example has 5 channels.
#
# Let's start by preparing the raw training and validation datasets into stacked NIfTI files.
# In[2]:
import json
import os
from glob import glob
from subprocess import call
import time
import nibabel
import numpy as np
from joblib import Parallel, delayed
def load_nifty(directory, example_id, suffix):
return nibabel.load(os.path.join(directory, example_id + "_" + suffix + ".nii.gz"))
def load_channels(d, example_id):
return [load_nifty(d, example_id, suffix) for suffix in ["flair", "t1", "t1ce", "t2"]]
def get_data(nifty, dtype="int16"):
if dtype == "int16":
data = np.abs(nifty.get_fdata().astype(np.int16))
data[data == -32768] = 0
return data
return nifty.get_fdata().astype(np.uint8)
def prepare_nifty(d):
example_id = d.split("/")[-1]
flair, t1, t1ce, t2 = load_channels(d, example_id)
affine, header = flair.affine, flair.header
vol = np.stack([get_data(flair), get_data(t1), get_data(t1ce), get_data(t2)], axis=-1)
vol = nibabel.nifti1.Nifti1Image(vol, affine, header=header)
nibabel.save(vol, os.path.join(d, example_id + ".nii.gz"))
if os.path.exists(os.path.join(d, example_id + "_seg.nii.gz")):
seg = load_nifty(d, example_id, "seg")
affine, header = seg.affine, seg.header
vol = get_data(seg, "unit8")
vol[vol == 4] = 3
seg = nibabel.nifti1.Nifti1Image(vol, affine, header=header)
nibabel.save(seg, os.path.join(d, example_id + "_seg.nii.gz"))
def prepare_dirs(data, train):
img_path, lbl_path = os.path.join(data, "images"), os.path.join(data, "labels")
call(f"mkdir {img_path}", shell=True)
if train:
call(f"mkdir {lbl_path}", shell=True)
dirs = glob(os.path.join(data, "BraTS*"))
for d in dirs:
if "_" in d.split("/")[-1]:
files = glob(os.path.join(d, "*.nii.gz"))
for f in files:
if "flair" in f or "t1" in f or "t1ce" in f or "t2" in f:
continue
if "_seg" in f:
call(f"mv {f} {lbl_path}", shell=True)
else:
call(f"mv {f} {img_path}", shell=True)
call(f"rm -rf {d}", shell=True)
def prepare_dataset_json(data, train):
images, labels = glob(os.path.join(data, "images", "*")), glob(os.path.join(data, "labels", "*"))
images = sorted([img.replace(data + "/", "") for img in images])
labels = sorted([lbl.replace(data + "/", "") for lbl in labels])
modality = {"0": "FLAIR", "1": "T1", "2": "T1CE", "3": "T2"}
labels_dict = {"0": "background", "1": "edema", "2": "non-enhancing tumor", "3": "enhancing tumour"}
if train:
key = "training"
data_pairs = [{"image": img, "label": lbl} for (img, lbl) in zip(images, labels)]
else:
key = "test"
data_pairs = [{"image": img} for img in images]
dataset = {
"labels": labels_dict,
"modality": modality,
key: data_pairs,
}
with open(os.path.join(data, "dataset.json"), "w") as outfile:
json.dump(dataset, outfile)
def run_parallel(func, args):
return Parallel(n_jobs=os.cpu_count())(delayed(func)(arg) for arg in args)
def prepare_dataset(data, train):
print(f"Preparing BraTS21 dataset from: {data}")
start = time.time()
run_parallel(prepare_nifty, sorted(glob(os.path.join(data, "BraTS*"))))
prepare_dirs(data, train)
prepare_dataset_json(data, train)
end = time.time()
print(f"Preparing time: {(end - start):.2f}")
prepare_dataset("/data/BraTS2021_train", True)
prepare_dataset("/data/BraTS2021_val", False)
print("Finished!")
# Now, lets preprocesses the datasets by cropping and normalizing the volumes. We will store the pre-processed volumes as NumPy arrays.
# In[3]:
get_ipython().system('python3 ../preprocess.py --task 11 --ohe --exec_mode training')
get_ipython().system('python3 ../preprocess.py --task 12 --ohe --exec_mode test')
print("Finished!")
# # Data Augmentations <a name="augmentations"></a>
#
# Data augmentation is a technique that alleviates the overfitting problem by artificially extending a dataset during the training phase. To make our method more robust, the following data augmentations are used during training phase:
#
# 1. **Biased crop**: From the input volume, a patch of dimensions (5, 128, 128, 128) was randomly cropped. Additionally, with probability of 0.4 the patch selected via random biased crop is guaranteed that some foreground voxels (with positive class in the ground truth) are present in the cropped region.
# 2. **Zoom**: With probability of 0.15, a zoom factor is sampled uniformly from (1.0, 1.4) and then input volume is zoomed by a sampled factor with cubic interpolation, while label with nearest neighbor interpolation.
# 3. **Flips**: With probability of 0.5, for each x, y, z axis independently, volume was flipped along that axis.
# 4. **Gaussian Noise**: With probability of 0.15, random Gaussian noise with mean zero and standard deviation sampled uniformly from (0, 0.33) is sampled for each voxel and added to the input volume.
# 5. **Gaussian Blur**: With probability of 0.15, Gaussian blurring with standard deviation of the Gaussian Kernel sampled uniformly from (0.5, 1.5) is applied to the input volume.
# 6. **Brightness**: With probability of 0.15, a random value is sampled uniformly from (0.7, 1.3) and then input volume voxels are multiplied by it.
# 7. **Contrast**: With probability of 0.15, a random value is sampled uniformly from (0.65, 1.5) and then input volume voxels are multiplied by it and clipped to its original min and max values.
#
# The data loading pipeline is implemented with [NVIDIA Data Loading Library (DALI)](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html), which addresses the problem of the CPU bottleneck by offloading data augmentations to the GPU. We encourage you to check out the implementation details of our [DALI pipeline](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/nnUNet/data_loading/dali_loader.py).
# # Loss function <a name="loss"></a>
#
# The BraTS leaderboard is computed based on three partially overlapping regions: whole tumor (1, 2, 4), tumor core (1, 4) and enhancing tumor (4), instead of classes present in the labels. Thus, it is beneficial to construct the loss function based on classes used for ranking calculation. Therefore, we optimize each region separately with a sum of binary Cross-Entropy with the Dice loss.
# In[4]:
import torch.nn as nn
from monai.losses import DiceLoss
class Loss(nn.Module):
def __init__(self):
super(Loss, self).__init__()
self.dice = DiceLoss(sigmoid=True, batch=True)
self.ce = nn.BCEWithLogitsLoss()
def _loss(self, p, y):
return self.dice(p, y) + self.ce(p, y.float())
def forward(self, p, y):
y_wt, y_tc, y_et = y > 0, ((y == 1) + (y == 3)) > 0, y == 3
p_wt, p_tc, p_et = p[:, 0].unsqueeze(1), p[:, 1].unsqueeze(1), p[:, 2].unsqueeze(1)
l_wt, l_tc, l_et = self._loss(p_wt, y_wt), self._loss(p_tc, y_tc), self._loss(p_et, y_et)
return l_wt + l_tc + l_et
# # Model <a name="model"></a>
#
# We have made some modifications to the U-Net architecture for the BraTS challenge with respect to the original nnU-Net template. In particular, the U-Net template in the nnU-Net has the encoder depth of 6, and the convolution channels at each encoder level are: 32, 64, 128, 256, 320, 320. Based on the experiments we run, increasing the depth of the encoder to 7, modifying the number of channels to: 64, 96, 128, 192, 256, 384, 512, and using deep supervision improves the final score.
#
# For deep supervision, we used two additional output heads at the decoder levels with feature map sizes (64, 64, 64) and (32, 32, 32). To match the shape of the additional predictions with the label shape of (128, 128, 128) we downsampled the label using the nearest neighbor interpolation to the (64, 64, 64) and (32, 32, 32) shapes, so that loss can be computed for additional outputs.
# In[5]:
from IPython.display import Image
Image(filename="../images/unet-brats22.jpg")
# Figure 1: *The U-Net architecture used for BraTS22 challenge.*
#
# # Training <a name="training"></a>
#
# Now, let's start training the model. For that, we will call the training script from our nnUNet repo with some additional command line arguments for BraTS challenge:
#
# - `--brats` - use loss function with partially overlapping regions (WT, TC, ET) and BraTS specific inference;
# - `--brats22_model` - use UNet3D model designed for BraTS22 edition;
#
# and the regular command line arguments:
#
# - `--scheduler` - use cosine decay learning rate scheduler with warm up 250 steps of warm up;
# - `--learning_rate 0.0003` - initial learning rate after warm up will be set to 0.0003;
# - `--epochs 30` - training will be done for 30 epochs;
# - `--fold 0` - training will be done for fold 0 (by default, 5-fold cross validation is used);
# - `--amp` - training with automatic mixed precision, for faster training and memory reduction;
# - `--gpus 1` - one GPU will be used during training;
# - `--task 11` - task number for BraTS21 training dataset. See file `data_preprocessing/configs.py` for more details;
# - `--nfolds 10` - increase the number of folds from the deafult 5 to 10;
# - `--save_ckpt` - save checkpoint with highest dice score acheived during training.
#
# We will run training on 1xA100 GPU. To train the model with [AMP](https://developer.nvidia.com/automatic-mixed-precision), you will need a GPU with at least 15G memory.
#
# Here, we will train the model on just 1-fold (fold with index 0) and 30 epochs. For the challenge submission, we have trained 5 models on each fold for 150 epochs, and averaged their predictions.
# In[6]:
get_ipython().system('python ../main.py --brats --brats22_model --scheduler --learning_rate 0.0003 --epochs 10 --fold 0 --amp --gpus 1 --task 11 --nfolds 10 --save_ckpt')
# # Inference <a name="inference"></a>
#
# During inference, the input volume can have arbitrary size, instead of the fixed patch size (128, 128, 128) as during the training phase. Thus, we use a [sliding window inference](https://docs.monai.io/en/latest/inferers.html) from [MONAI](https://monai.io/) library, where the window has the same size as the training patch, i.e., (128, 128, 128) and adjacent windows overlap by half the size of a patch. The predictions on the overlapping regions are then averaged with Gaussian importance weighting, such that the weights of the center voxels have higher importance.
#
# One of the known tricks to improve predictions robustness is to apply test time augmentations (TTA). During inference, we are creating eight versions of the input volume, such that each version corresponds to one of eight possible flips along the x, y, z axis combination. Then we run inference for each version of the input volume and transform the predictions back to the original input volume orientation by applying the same flips to predictions as was used for the input volume. Finally, the probabilities from all predictions were averaged.
#
# Let's run inference with TTA on the challenge validation dataset.
#
# Note: You will have to modify the `--ckpt_path` argument, such that the path to checkpoint is valid.
# In[7]:
get_ipython().system('python ../main.py --gpus 1 --amp --save_preds --exec_mode predict --brats --brats22_model --data /data/12_3d/test --ckpt_path /results/checkpoints/epoch=8-dice=89.94.ckpt --tta')
# # Post-processing <a name="postprocessing"></a>
#
# By optimizing the three overlapping regions (ET, TC, WT) we need to convert them back to the original classes (NCR, ED, ET). The strategy for transforming classes back to the original one is the following: if the WT probability for a given voxel is less than 0.45 then its class is set to 0 (background), otherwise if the probability for TC is less than 0.4 the voxel class is 2 (ED), and finally if probability for ET is less than 0.4 voxel has class 1 (NCR), or otherwise 4 (ET).
#
# Furthermore, we applied the following post-processing strategy: find ET connected components, for components smaller than 16 voxels with mean probability smaller than 0.9, replace their class to NCR (such that voxels are still considered part of the tumor core), next if there is overall less than 73 voxels with ET and their mean probability is smaller than 0.9 replace all ET voxels to NCR. With such post-processing we avoided the edge case where the model predicted a few voxels with enhancing tumor (ET) but there were not any in the ground truth. Such post-processing was beneficial to the final score as if there were no enhancing tumor voxels in the label, then the Dice score for zero false positive prediction was 1, and 0 otherwise.
# In[8]:
import os
from glob import glob
from subprocess import call
import nibabel as nib
import numpy as np
from scipy.ndimage.measurements import label
def to_lbl(pred):
enh = pred[2]
c1, c2, c3 = pred[0] > 0.4, pred[1] > 0.35, pred[2] > 0.375
pred = (c1 > 0).astype(np.uint8)
pred[(c2 == False) * (c1 == True)] = 2
pred[(c3 == True) * (c1 == True)] = 4
components, n = label(pred == 4)
for et_idx in range(1, n + 1):
_, counts = np.unique(pred[components == et_idx], return_counts=True)
if 1 < counts[0] and counts[0] < 4 and np.mean(enh[components == et_idx]) < 0.9:
pred[components == et_idx] = 1
et = pred == 4
if 0 < et.sum() and et.sum() < 5 and np.mean(enh[et]) < 0.9:
pred[et] = 1
pred = np.transpose(pred, (2, 1, 0)).astype(np.uint8)
return pred
def prepare_preditions(e):
fname = e[0].split("/")[-1].split(".")[0]
preds = [np.load(f) for f in e]
p = to_lbl(np.mean(preds, 0))
img = nib.load(f"/data/BraTS2021_val/images/{fname}.nii.gz")
nib.save(
nib.Nifti1Image(p, img.affine, header=img.header),
os.path.join("/results/final_preds", fname + ".nii.gz"),
)
os.makedirs("/results/final_preds")
preds = sorted(glob(f"/results/predictions*"))
examples = list(zip(*[sorted(glob(f"{p}/*.npy")) for p in preds]))
print("Preparing final predictions")
for e in examples:
prepare_preditions(e)
print("Finished!")
# # Visualization
#
# Let's visualize the final prediction made on the challenge validation dataset.
# In[9]:
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from glob import glob
n, z = 5, 75
data = sorted(glob("/results/final_preds/*.nii.gz"))
for i in range(n):
fname = data[i].split("/")[-1].split(".")[0]
print(fname)
img = nib.load(f"/data/BraTS2021_val/images/{fname}.nii.gz").get_fdata().astype(np.float32)
pred = nib.load(data[i]).get_fdata().astype(np.uint8)[:, :, z]
imgs = [img[:, :, z, i] for i in [0, 3]] + [pred]
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12, 12))
for i in range(3):
if i < 2:
ax[i].imshow(imgs[i], cmap='gray')
else:
ax[i].imshow(imgs[i]);
ax[i].axis('off')
plt.tight_layout()
plt.show()
|
TensorFlow/Detection/SSD/models/research/object_detection/metrics | metrics | coco_tools | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for third party pycocotools to be used within object_detection.
Note that nothing in this file is tensorflow related and thus cannot
be called directly as a slim metric, for example.
TODO(jonathanhuang): wrap as a slim metric in metrics.py
Usage example: given a set of images with ids in the list image_ids
and corresponding lists of numpy arrays encoding groundtruth (boxes and classes)
and detections (boxes, scores and classes), where elements of each list
correspond to detections/annotations of a single image,
then evaluation (in multi-class mode) can be invoked as follows:
groundtruth_dict = coco_tools.ExportGroundtruthToCOCO(
image_ids, groundtruth_boxes_list, groundtruth_classes_list,
max_num_classes, output_path=None)
detections_list = coco_tools.ExportDetectionsToCOCO(
image_ids, detection_boxes_list, detection_scores_list,
detection_classes_list, output_path=None)
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
from collections import OrderedDict
import copy
import time
import dllogger
import numpy as np
from pycocotools import coco
from pycocotools import cocoeval
from pycocotools import mask
import tensorflow as tf
from object_detection.utils import json_utils
class COCOWrapper(coco.COCO):
"""Wrapper for the pycocotools COCO class."""
def __init__(self, dataset, detection_type='bbox'):
"""COCOWrapper constructor.
See http://mscoco.org/dataset/#format for a description of the format.
By default, the coco.COCO class constructor reads from a JSON file.
This function duplicates the same behavior but loads from a dictionary,
allowing us to perform evaluation without writing to external storage.
Args:
dataset: a dictionary holding bounding box annotations in the COCO format.
detection_type: type of detections being wrapped. Can be one of ['bbox',
'segmentation']
Raises:
ValueError: if detection_type is unsupported.
"""
supported_detection_types = ['bbox', 'segmentation']
if detection_type not in supported_detection_types:
raise ValueError('Unsupported detection type: {}. '
'Supported values are: {}'.format(
detection_type, supported_detection_types))
self._detection_type = detection_type
coco.COCO.__init__(self)
self.dataset = dataset
self.createIndex()
def LoadAnnotations(self, annotations):
"""Load annotations dictionary into COCO datastructure.
See http://mscoco.org/dataset/#format for a description of the annotations
format. As above, this function replicates the default behavior of the API
but does not require writing to external storage.
Args:
annotations: python list holding object detection results where each
detection is encoded as a dict with required keys ['image_id',
'category_id', 'score'] and one of ['bbox', 'segmentation'] based on
`detection_type`.
Returns:
a coco.COCO datastructure holding object detection annotations results
Raises:
ValueError: if annotations is not a list
ValueError: if annotations do not correspond to the images contained
in self.
"""
results = coco.COCO()
results.dataset['images'] = [img for img in self.dataset['images']]
tf.logging.info('Loading and preparing annotation results...')
tic = time.time()
if not isinstance(annotations, list):
raise ValueError('annotations is not a list of objects')
annotation_img_ids = [ann['image_id'] for ann in annotations]
if (set(annotation_img_ids) != (set(annotation_img_ids)
& set(self.getImgIds()))):
raise ValueError('Results do not correspond to current coco set')
results.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
if self._detection_type == 'bbox':
for idx, ann in enumerate(annotations):
bb = ann['bbox']
ann['area'] = bb[2] * bb[3]
ann['id'] = idx + 1
ann['iscrowd'] = 0
elif self._detection_type == 'segmentation':
for idx, ann in enumerate(annotations):
ann['area'] = mask.area(ann['segmentation'])
ann['bbox'] = mask.toBbox(ann['segmentation'])
ann['id'] = idx + 1
ann['iscrowd'] = 0
tf.logging.info('DONE (t=%0.2fs)', (time.time() - tic))
results.dataset['annotations'] = annotations
results.createIndex()
return results
class COCOEvalWrapper(cocoeval.COCOeval):
"""Wrapper for the pycocotools COCOeval class.
To evaluate, create two objects (groundtruth_dict and detections_list)
using the conventions listed at http://mscoco.org/dataset/#format.
Then call evaluation as follows:
groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
detections = groundtruth.LoadAnnotations(detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections,
agnostic_mode=False)
metrics = evaluator.ComputeMetrics()
"""
def __init__(self, groundtruth=None, detections=None, agnostic_mode=False,
iou_type='bbox'):
"""COCOEvalWrapper constructor.
Note that for the area-based metrics to be meaningful, detection and
groundtruth boxes must be in image coordinates measured in pixels.
Args:
groundtruth: a coco.COCO (or coco_tools.COCOWrapper) object holding
groundtruth annotations
detections: a coco.COCO (or coco_tools.COCOWrapper) object holding
detections
agnostic_mode: boolean (default: False). If True, evaluation ignores
class labels, treating all detections as proposals.
iou_type: IOU type to use for evaluation. Supports `bbox` or `segm`.
"""
cocoeval.COCOeval.__init__(self, groundtruth, detections,
iouType=iou_type)
if agnostic_mode:
self.params.useCats = 0
def GetCategory(self, category_id):
"""Fetches dictionary holding category information given category id.
Args:
category_id: integer id
Returns:
dictionary holding 'id', 'name'.
"""
return self.cocoGt.cats[category_id]
def GetAgnosticMode(self):
"""Returns true if COCO Eval is configured to evaluate in agnostic mode."""
return self.params.useCats == 0
def GetCategoryIdList(self):
"""Returns list of valid category ids."""
return self.params.catIds
def ComputeMetrics(self,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Computes detection metrics.
Args:
include_metrics_per_category: If True, will include metrics per category.
all_metrics_per_category: If true, include all the summery metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
Returns:
1. summary_metrics: a dictionary holding:
'Precision/mAP': mean average precision over classes averaged over IOU
thresholds ranging from .5 to .95 with .05 increments
'Precision/mAP@.50IOU': mean average precision at 50% IOU
'Precision/mAP@.75IOU': mean average precision at 75% IOU
'Precision/mAP (small)': mean average precision for small objects
(area < 32^2 pixels)
'Precision/mAP (medium)': mean average precision for medium sized
objects (32^2 pixels < area < 96^2 pixels)
'Precision/mAP (large)': mean average precision for large objects
(96^2 pixels < area < 10000^2 pixels)
'Recall/AR@1': average recall with 1 detection
'Recall/AR@10': average recall with 10 detections
'Recall/AR@100': average recall with 100 detections
'Recall/AR@100 (small)': average recall for small objects with 100
detections
'Recall/AR@100 (medium)': average recall for medium objects with 100
detections
'Recall/AR@100 (large)': average recall for large objects with 100
detections
2. per_category_ap: a dictionary holding category specific results with
keys of the form: 'Precision mAP ByCategory/category'
(without the supercategory part if no supercategories exist).
For backward compatibility 'PerformanceByCategory' is included in the
output regardless of all_metrics_per_category.
If evaluating class-agnostic mode, per_category_ap is an empty
dictionary.
Raises:
ValueError: If category_stats does not exist.
"""
self.evaluate()
self.accumulate()
self.summarize()
summary_metrics = OrderedDict([
('Precision/mAP', self.stats[0]),
('Precision/mAP@.50IOU', self.stats[1]),
('Precision/mAP@.75IOU', self.stats[2]),
('Precision/mAP (small)', self.stats[3]),
('Precision/mAP (medium)', self.stats[4]),
('Precision/mAP (large)', self.stats[5]),
('Recall/AR@1', self.stats[6]),
('Recall/AR@10', self.stats[7]),
('Recall/AR@100', self.stats[8]),
('Recall/AR@100 (small)', self.stats[9]),
('Recall/AR@100 (medium)', self.stats[10]),
('Recall/AR@100 (large)', self.stats[11])
])
dllogger.log(step=tuple(), data=summary_metrics)
if not include_metrics_per_category:
return summary_metrics, {}
if not hasattr(self, 'category_stats'):
raise ValueError('Category stats do not exist')
per_category_ap = OrderedDict([])
if self.GetAgnosticMode():
return summary_metrics, per_category_ap
for category_index, category_id in enumerate(self.GetCategoryIdList()):
category = self.GetCategory(category_id)['name']
# Kept for backward compatilbility
per_category_ap['PerformanceByCategory/mAP/{}'.format(
category)] = self.category_stats[0][category_index]
if all_metrics_per_category:
per_category_ap['Precision mAP ByCategory/{}'.format(
category)] = self.category_stats[0][category_index]
per_category_ap['Precision mAP@.50IOU ByCategory/{}'.format(
category)] = self.category_stats[1][category_index]
per_category_ap['Precision mAP@.75IOU ByCategory/{}'.format(
category)] = self.category_stats[2][category_index]
per_category_ap['Precision mAP (small) ByCategory/{}'.format(
category)] = self.category_stats[3][category_index]
per_category_ap['Precision mAP (medium) ByCategory/{}'.format(
category)] = self.category_stats[4][category_index]
per_category_ap['Precision mAP (large) ByCategory/{}'.format(
category)] = self.category_stats[5][category_index]
per_category_ap['Recall AR@1 ByCategory/{}'.format(
category)] = self.category_stats[6][category_index]
per_category_ap['Recall AR@10 ByCategory/{}'.format(
category)] = self.category_stats[7][category_index]
per_category_ap['Recall AR@100 ByCategory/{}'.format(
category)] = self.category_stats[8][category_index]
per_category_ap['Recall AR@100 (small) ByCategory/{}'.format(
category)] = self.category_stats[9][category_index]
per_category_ap['Recall AR@100 (medium) ByCategory/{}'.format(
category)] = self.category_stats[10][category_index]
per_category_ap['Recall AR@100 (large) ByCategory/{}'.format(
category)] = self.category_stats[11][category_index]
return summary_metrics, per_category_ap
def _ConvertBoxToCOCOFormat(box):
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def _RleCompress(masks):
"""Compresses mask using Run-length encoding provided by pycocotools.
Args:
masks: uint8 numpy array of shape [mask_height, mask_width] with values in
{0, 1}.
Returns:
A pycocotools Run-length encoding of the mask.
"""
return mask.encode(np.asfortranarray(masks))
def ExportSingleImageGroundtruthToCoco(image_id,
next_annotation_id,
category_id_set,
groundtruth_boxes,
groundtruth_classes,
groundtruth_masks=None,
groundtruth_is_crowd=None):
"""Export groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_masks: optional uint8 numpy array of shape [num_detections,
image_height, image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
export_dict = {
'id':
next_annotation_id + i,
'image_id':
image_id,
'category_id':
int(groundtruth_classes[i]),
'bbox':
list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'area':
float((groundtruth_boxes[i, 2] - groundtruth_boxes[i, 0]) *
(groundtruth_boxes[i, 3] - groundtruth_boxes[i, 1])),
'iscrowd':
iscrowd
}
if groundtruth_masks is not None:
export_dict['segmentation'] = _RleCompress(groundtruth_masks[i])
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportGroundtruthToCOCO(image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=None):
"""Export groundtruth detection annotations in numpy arrays to COCO API.
This function converts a set of groundtruth detection annotations represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are three lists: image ids for each groundtruth image,
groundtruth boxes for each image and groundtruth classes respectively.
Note that the image_ids provided here must match the ones given to the
ExportDetectionsToCOCO function in order for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: image_id[i], groundtruth_boxes[i, :] and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the area of the
groundtruth bounding box and "iscrowd" fields are always set to 0.
TODO(jonathanhuang): pass in "iscrowd" array for evaluating on COCO dataset.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
groundtruth_boxes: list of numpy arrays with shape [num_gt_boxes, 4]
(note that num_gt_boxes can be different for each entry in the list)
groundtruth_classes: list of numpy arrays (int) with shape [num_gt_boxes]
(note that num_gt_boxes can be different for each entry in the list)
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
output_path: (optional) path for exporting result to JSON
Returns:
dictionary that can be read by COCO API
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
category_id_set = set([cat['id'] for cat in categories])
groundtruth_export_list = []
image_export_list = []
if not len(image_ids) == len(groundtruth_boxes) == len(groundtruth_classes):
raise ValueError('Input lists must have the same length')
# For reasons internal to the COCO API, it is important that annotation ids
# are not equal to zero; we thus start counting from 1.
annotation_id = 1
for image_id, boxes, classes in zip(image_ids, groundtruth_boxes,
groundtruth_classes):
image_export_list.append({'id': image_id})
groundtruth_export_list.extend(ExportSingleImageGroundtruthToCoco(
image_id,
annotation_id,
category_id_set,
boxes,
classes))
num_boxes = classes.shape[0]
annotation_id += num_boxes
groundtruth_dict = {
'annotations': groundtruth_export_list,
'images': image_export_list,
'categories': categories
}
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(groundtruth_dict, fid, float_digits=4, indent=2)
return groundtruth_dict
def ExportSingleImageDetectionBoxesToCoco(image_id,
category_id_set,
detection_boxes,
detection_scores,
detection_classes):
"""Export detections of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. Note that the image_ids
provided here must match the ones given to the
ExporSingleImageDetectionBoxesToCoco. We assume that boxes, and classes are in
correspondence - that is: boxes[i, :], and classes[i]
are associated with the same groundtruth annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_boxes: float numpy array of shape [num_detections, 4] containing
detection boxes.
detection_scores: float numpy array of shape [num_detections] containing
scored for the detection boxes.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection boxes.
Returns:
a list of detection annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_boxes, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(detection_boxes.shape) != 2:
raise ValueError('All entries in detection_boxes expected to be of '
'rank 2.')
if detection_boxes.shape[1] != 4:
raise ValueError('All entries in detection_boxes should have '
'shape[1] == 4.')
num_boxes = detection_classes.shape[0]
if not num_boxes == detection_boxes.shape[0] == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension). '
'Classes shape: %d. Boxes shape: %d. '
'Scores shape: %d' % (
detection_classes.shape[0], detection_boxes.shape[0],
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'bbox': list(_ConvertBoxToCOCOFormat(detection_boxes[i, :])),
'score': float(detection_scores[i])
})
return detections_list
def ExportSingleImageDetectionMasksToCoco(image_id,
category_id_set,
detection_masks,
detection_scores,
detection_classes):
"""Export detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': _RleCompress(detection_masks[i]),
'score': float(detection_scores[i])
})
return detections_list
def ExportDetectionsToCOCO(image_ids,
detection_boxes,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export detection annotations in numpy arrays to COCO API.
This function converts a set of predicted detections represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of boxes, scores and
classes, respectively, corresponding to each image for which detections
have been produced. Note that the image_ids provided here must
match the ones given to the ExportGroundtruthToCOCO function in order
for evaluation to work properly.
We assume that for each image, boxes, scores and classes are in
correspondence --- that is: detection_boxes[i, :], detection_scores[i] and
detection_classes[i] are associated with the same detection.
Args:
image_ids: a list of unique image identifier either of type integer or
string.
detection_boxes: list of numpy arrays with shape [num_detection_boxes, 4]
detection_scores: list of numpy arrays (float) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection_boxes]. Note that num_detection_boxes can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'bbox', 'score'].
Raises:
ValueError: if (1) detection_boxes and detection_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers.
"""
category_id_set = set([cat['id'] for cat in categories])
detections_export_list = []
if not (len(image_ids) == len(detection_boxes) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
for image_id, boxes, scores, classes in zip(image_ids, detection_boxes,
detection_scores,
detection_classes):
detections_export_list.extend(ExportSingleImageDetectionBoxesToCoco(
image_id,
category_id_set,
boxes,
scores,
classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(detections_export_list, fid, float_digits=4, indent=2)
return detections_export_list
def ExportSegmentsToCOCO(image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Export segmentation masks in numpy arrays to COCO API.
This function converts a set of predicted instance masks represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of segments, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
Note this function is recommended to use for small dataset.
For large dataset, it should be used with a merge function
(e.g. in map reduce), otherwise the memory consumption is large.
We assume that for each image, masks, scores and classes are in
correspondence --- that is: detection_masks[i, :, :, :], detection_scores[i]
and detection_classes[i] are associated with the same detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_masks: list of numpy arrays with shape [num_detection, h, w, 1]
and type uint8. The height and width should match the shape of
corresponding image.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'segmentation', 'score'].
Raises:
ValueError: if detection_masks and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_masks) == len(detection_scores) ==
len(detection_classes)):
raise ValueError('Input lists must have the same length')
segment_export_list = []
for image_id, masks, scores, classes in zip(image_ids, detection_masks,
detection_scores,
detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(masks.shape) != 4:
raise ValueError('All entries in masks expected to be of '
'rank 4. Given {}'.format(masks.shape))
num_boxes = classes.shape[0]
if not num_boxes == masks.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in segment_classes, '
'detection_scores and detection_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
segment_export_list.extend(ExportSingleImageDetectionMasksToCoco(
image_id, category_id_set, np.squeeze(masks, axis=3), scores, classes))
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(segment_export_list, fid, float_digits=4, indent=2)
return segment_export_list
def ExportKeypointsToCOCO(image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=None):
"""Exports keypoints in numpy arrays to COCO API.
This function converts a set of predicted keypoints represented
as numpy arrays to dictionaries that can be ingested by the COCO API.
Inputs to this function are lists, consisting of keypoints, scores and
classes, respectively, corresponding to each image for which detections
have been produced.
We assume that for each image, keypoints, scores and classes are in
correspondence --- that is: detection_keypoints[i, :, :, :],
detection_scores[i] and detection_classes[i] are associated with the same
detection.
Args:
image_ids: list of image ids (typically ints or strings)
detection_keypoints: list of numpy arrays with shape
[num_detection, num_keypoints, 2] and type float32 in absolute
x-y coordinates.
detection_scores: list of numpy arrays (float) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
detection_classes: list of numpy arrays (int) with shape
[num_detection]. Note that num_detection can be different
for each entry in the list.
categories: a list of dictionaries representing all possible categories.
Each dict in this list must have an integer 'id' key uniquely identifying
this category and an integer 'num_keypoints' key specifying the number of
keypoints the category has.
output_path: (optional) path for exporting result to JSON
Returns:
list of dictionaries that can be read by COCO API, where each entry
corresponds to a single detection and has keys from:
['image_id', 'category_id', 'keypoints', 'score'].
Raises:
ValueError: if detection_keypoints and detection_classes do not have the
right lengths or if each of the elements inside these lists do not
have the correct shapes.
"""
if not (len(image_ids) == len(detection_keypoints) ==
len(detection_scores) == len(detection_classes)):
raise ValueError('Input lists must have the same length')
keypoints_export_list = []
for image_id, keypoints, scores, classes in zip(
image_ids, detection_keypoints, detection_scores, detection_classes):
if len(classes.shape) != 1 or len(scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
if len(keypoints.shape) != 3:
raise ValueError('All entries in keypoints expected to be of '
'rank 3. Given {}'.format(keypoints.shape))
num_boxes = classes.shape[0]
if not num_boxes == keypoints.shape[0] == scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_keypoints, and detection_scores should have '
'compatible shapes (i.e., agree on the 0th dimension).')
category_id_set = set([cat['id'] for cat in categories])
category_id_to_num_keypoints_map = {
cat['id']: cat['num_keypoints'] for cat in categories
if 'num_keypoints' in cat}
for i in range(num_boxes):
if classes[i] not in category_id_set:
raise ValueError('class id should be in category_id_set\n')
if classes[i] in category_id_to_num_keypoints_map:
num_keypoints = category_id_to_num_keypoints_map[classes[i]]
# Adds extra ones to indicate the visibility for each keypoint as is
# recommended by MSCOCO.
instance_keypoints = np.concatenate(
[keypoints[i, 0:num_keypoints, :],
np.expand_dims(np.ones(num_keypoints), axis=1)],
axis=1).astype(int)
instance_keypoints = instance_keypoints.flatten().tolist()
keypoints_export_list.append({
'image_id': image_id,
'category_id': int(classes[i]),
'keypoints': instance_keypoints,
'score': float(scores[i])
})
if output_path:
with tf.gfile.GFile(output_path, 'w') as fid:
json_utils.Dump(keypoints_export_list, fid, float_digits=4, indent=2)
return keypoints_export_list
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton | triton | calculate_metrics | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
LOGGER.info("Results:")
for key, value in metrics.items():
LOGGER.info(f" {key}: {value}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
|
PyTorch/DrugDiscovery/MoFlow/moflow/model | model | coupling | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from typing import Tuple
import torch
import torch.nn as nn
from torch.nn.functional import logsigmoid
from moflow.model.basic import GraphConv
def sigmoid_inverse(x):
"""Calculates 1/sigmoid(x) in a more numerically stable way"""
return 1 + torch.exp(-x)
class AffineCoupling(nn.Module): # delete
def __init__(self, in_channel, hidden_channels, mask_swap=False): # filter_size=512, --> hidden_channels =(512, 512)
super(AffineCoupling, self).__init__()
self.mask_swap=mask_swap
# self.norms_in = nn.ModuleList()
last_h = in_channel // 2
vh = tuple(hidden_channels)
layers = []
for h in vh:
layers.append(nn.Conv2d(last_h, h, kernel_size=3, padding=1))
layers.append(nn.BatchNorm2d(h))
layers.append(nn.ReLU(inplace=True))
last_h = h
layers.append(nn.Conv2d(last_h, in_channel, kernel_size=3, padding=1))
self.layers = nn.Sequential(*layers)
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
in_a, in_b = input.chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
if self.mask_swap:
in_a, in_b = in_b, in_a
s_logits, t = self._s_t_function(in_a)
s = torch.sigmoid(s_logits)
out_b = (in_b + t) * s
logdet = torch.sum(logsigmoid(s_logits).reshape(input.shape[0], -1), 1)
if self.mask_swap:
result = torch.cat([out_b, in_a], 1)
else:
result = torch.cat([in_a, out_b], 1)
return result, logdet
@torch.jit.export
def reverse(self, output: torch.Tensor) -> torch.Tensor:
out_a, out_b = output.chunk(2, 1)
if self.mask_swap:
out_a, out_b = out_b, out_a
s_logits, t = self._s_t_function(out_a)
s_inverse = sigmoid_inverse(s_logits)
in_b = out_b * s_inverse - t
if self.mask_swap:
result = torch.cat([in_b, out_a], 1)
else:
result = torch.cat([out_a, in_b], 1)
return result
def _s_t_function(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.layers(x)
s_logits, t = h.chunk(2, 1)
return s_logits, t
class ConvCouplingBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, n_node: int) -> None:
super().__init__()
self.graph_conv = GraphConv(in_dim, out_dim, n_node)
self.bn = nn.BatchNorm2d(n_node)
self.relu = nn.ReLU(inplace=True)
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, nodes = graph
h = self.graph_conv(graph)
h = h.to(memory_format=torch.channels_last)
h = self.bn(h)
h = self.relu(h)
return adj, h
class LinCouplingBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, n_node: int) -> None:
super().__init__()
self.lin = nn.Linear(in_dim, out_dim)
self.bn = nn.BatchNorm2d(n_node)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.lin(x)
h = h.to(memory_format=torch.channels_last)
h = self.bn(h)
h = self.relu(h)
return h
class GraphAffineCoupling(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row):
super(GraphAffineCoupling, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.hidden_dim_gnn = hidden_dim_dict['gnn']
self.hidden_dim_linear = hidden_dim_dict['linear']
conv_layers = []
last_dim = in_dim
for out_dim in self.hidden_dim_gnn:
conv_layers.append(ConvCouplingBlock(last_dim, out_dim, n_node))
last_dim = out_dim
self.net_conv = nn.ModuleList(conv_layers)
lin_layers = []
for out_dim in self.hidden_dim_linear:
lin_layers.append(LinCouplingBlock(last_dim, out_dim, n_node))
last_dim = out_dim
lin_layers.append(nn.Linear(last_dim, in_dim*2))
self.net_lin = nn.Sequential(*lin_layers)
mask = torch.ones(n_node, in_dim)
mask[masked_row, :] = 0 # masked_row are kept same, and used for _s_t for updating the left rows
self.register_buffer('mask', mask)
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, input = graph
masked_x = self.mask * input
masked_x_sq = masked_x.unsqueeze(2)
s_logits, t = self._s_t_function((adj, masked_x_sq))
s = torch.sigmoid(s_logits)
out = masked_x + (1-self.mask) * (input + t) * s
logdet = torch.sum(logsigmoid(s_logits).reshape(input.shape[0], -1), 1)
return out, logdet
@torch.jit.export
def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, output = graph
masked_y = self.mask * output
masked_y_sq = masked_y.unsqueeze(2)
s_logits, t = self._s_t_function((adj, masked_y_sq))
s_inverse = sigmoid_inverse(s_logits)
input = masked_y + (1 - self.mask) * (output * s_inverse - t)
return input
def _s_t_function(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for l in self.net_conv:
graph = l(graph)
adj, h = graph
h = self.net_lin(h)
h = h.squeeze(2)
s_logits, t = h.chunk(2, dim=-1)
return s_logits, t
|
TensorFlow/Detection/SSD/models/research/object_detection/g3doc | g3doc | defining_your_own_model | # So you want to create a new model!
In this section, we discuss some of the abstractions that we use
for defining detection models. If you would like to define a new model
architecture for detection and use it in the Tensorflow Detection API,
then this section should also serve as a high level guide to the files that you
will need to edit to get your new model working.
## DetectionModels (`object_detection/core/model.py`)
In order to be trained, evaluated, and exported for serving using our
provided binaries, all models under the Tensorflow Object Detection API must
implement the `DetectionModel` interface (see the full definition in `object_detection/core/model.py`). In particular,
each of these models are responsible for implementing 5 functions:
* `preprocess`: Run any preprocessing (e.g., scaling/shifting/reshaping) of
input values that is necessary prior to running the detector on an input
image.
* `predict`: Produce “raw” prediction tensors that can be passed to loss or
postprocess functions.
* `postprocess`: Convert predicted output tensors to final detections.
* `loss`: Compute scalar loss tensors with respect to provided groundtruth.
* `restore`: Load a checkpoint into the Tensorflow graph.
Given a `DetectionModel` at training time, we pass each image batch through
the following sequence of functions to compute a loss which can be optimized via
SGD:
```
inputs (images tensor) -> preprocess -> predict -> loss -> outputs (loss tensor)
```
And at eval time, we pass each image batch through the following sequence of
functions to produce a set of detections:
```
inputs (images tensor) -> preprocess -> predict -> postprocess ->
outputs (boxes tensor, scores tensor, classes tensor, num_detections tensor)
```
Some conventions to be aware of:
* `DetectionModel`s should make no assumptions about the input size or aspect
ratio --- they are responsible for doing any resize/reshaping necessary
(see docstring for the `preprocess` function).
* Output classes are always integers in the range `[0, num_classes)`.
Any mapping of these integers to semantic labels is to be handled outside
of this class. We never explicitly emit a “background class” --- thus 0 is
the first non-background class and any logic of predicting and removing
implicit background classes must be handled internally by the implementation.
* Detected boxes are to be interpreted as being in
`[y_min, x_min, y_max, x_max]` format and normalized relative to the
image window.
* We do not specifically assume any kind of probabilistic interpretation of the
scores --- the only important thing is their relative ordering. Thus
implementations of the postprocess function are free to output logits,
probabilities, calibrated probabilities, or anything else.
## Defining a new Faster R-CNN or SSD Feature Extractor
In most cases, you probably will not implement a `DetectionModel` from scratch
--- instead you might create a new feature extractor to be used by one of the
SSD or Faster R-CNN meta-architectures. (We think of meta-architectures as
classes that define entire families of models using the `DetectionModel`
abstraction).
Note: For the following discussion to make sense, we recommend first becoming
familiar with the [Faster R-CNN](https://arxiv.org/abs/1506.01497) paper.
Let’s now imagine that you have invented a brand new network architecture
(say, “InceptionV100”) for classification and want to see how InceptionV100
would behave as a feature extractor for detection (say, with Faster R-CNN).
A similar procedure would hold for SSD models, but we’ll discuss Faster R-CNN.
To use InceptionV100, we will have to define a new
`FasterRCNNFeatureExtractor` and pass it to our `FasterRCNNMetaArch`
constructor as input. See
`object_detection/meta_architectures/faster_rcnn_meta_arch.py` for definitions
of `FasterRCNNFeatureExtractor` and `FasterRCNNMetaArch`, respectively.
A `FasterRCNNFeatureExtractor` must define a few
functions:
* `preprocess`: Run any preprocessing of input values that is necessary prior
to running the detector on an input image.
* `_extract_proposal_features`: Extract first stage Region Proposal Network
(RPN) features.
* `_extract_box_classifier_features`: Extract second stage Box Classifier
features.
* `restore_from_classification_checkpoint_fn`: Load a checkpoint into the
Tensorflow graph.
See the `object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py`
definition as one example. Some remarks:
* We typically initialize the weights of this feature extractor
using those from the
[Slim Resnet-101 classification checkpoint](https://github.com/tensorflow/models/tree/master/research/slim#pre-trained-models),
and we know
that images were preprocessed when training this checkpoint
by subtracting a channel mean from each input
image. Thus, we implement the preprocess function to replicate the same
channel mean subtraction behavior.
* The “full” resnet classification network defined in slim is cut into two
parts --- all but the last “resnet block” is put into the
`_extract_proposal_features` function and the final block is separately
defined in the `_extract_box_classifier_features function`. In general,
some experimentation may be required to decide on an optimal layer at
which to “cut” your feature extractor into these two pieces for Faster R-CNN.
## Register your model for configuration
Assuming that your new feature extractor does not require nonstandard
configuration, you will want to ideally be able to simply change the
“feature_extractor.type” fields in your configuration protos to point to a
new feature extractor. In order for our API to know how to understand this
new type though, you will first have to register your new feature
extractor with the model builder (`object_detection/builders/model_builder.py`),
whose job is to create models from config protos..
Registration is simple --- just add a pointer to the new Feature Extractor
class that you have defined in one of the SSD or Faster R-CNN Feature
Extractor Class maps at the top of the
`object_detection/builders/model_builder.py` file.
We recommend adding a test in `object_detection/builders/model_builder_test.py`
to make sure that parsing your proto will work as expected.
## Taking your new model for a spin
After registration you are ready to go with your model! Some final tips:
* To save time debugging, try running your configuration file locally first
(both training and evaluation).
* Do a sweep of learning rates to figure out which learning rate is best
for your model.
* A small but often important detail: you may find it necessary to disable
batchnorm training (that is, load the batch norm parameters from the
classification checkpoint, but do not update them during gradient descent).
|
TensorFlow/Detection/SSD/models/research/object_detection/data | data | pet_label_map | item {
id: 1
name: 'Abyssinian'
}
item {
id: 2
name: 'american_bulldog'
}
item {
id: 3
name: 'american_pit_bull_terrier'
}
item {
id: 4
name: 'basset_hound'
}
item {
id: 5
name: 'beagle'
}
item {
id: 6
name: 'Bengal'
}
item {
id: 7
name: 'Birman'
}
item {
id: 8
name: 'Bombay'
}
item {
id: 9
name: 'boxer'
}
item {
id: 10
name: 'British_Shorthair'
}
item {
id: 11
name: 'chihuahua'
}
item {
id: 12
name: 'Egyptian_Mau'
}
item {
id: 13
name: 'english_cocker_spaniel'
}
item {
id: 14
name: 'english_setter'
}
item {
id: 15
name: 'german_shorthaired'
}
item {
id: 16
name: 'great_pyrenees'
}
item {
id: 17
name: 'havanese'
}
item {
id: 18
name: 'japanese_chin'
}
item {
id: 19
name: 'keeshond'
}
item {
id: 20
name: 'leonberger'
}
item {
id: 21
name: 'Maine_Coon'
}
item {
id: 22
name: 'miniature_pinscher'
}
item {
id: 23
name: 'newfoundland'
}
item {
id: 24
name: 'Persian'
}
item {
id: 25
name: 'pomeranian'
}
item {
id: 26
name: 'pug'
}
item {
id: 27
name: 'Ragdoll'
}
item {
id: 28
name: 'Russian_Blue'
}
item {
id: 29
name: 'saint_bernard'
}
item {
id: 30
name: 'samoyed'
}
item {
id: 31
name: 'scottish_terrier'
}
item {
id: 32
name: 'shiba_inu'
}
item {
id: 33
name: 'Siamese'
}
item {
id: 34
name: 'Sphynx'
}
item {
id: 35
name: 'staffordshire_bull_terrier'
}
item {
id: 36
name: 'wheaten_terrier'
}
item {
id: 37
name: 'yorkshire_terrier'
}
|
PyTorch/Detection/Efficientdet/effdet/layers | layers | nms_layer | """ PyTorch Soft-NMS
This code was adapted from a PR for detectron2 submitted by https://github.com/alekseynp
https://github.com/facebookresearch/detectron2/pull/1183/files
Detectron2 is licensed Apache 2.0, Copyright Facebook Inc.
"""
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from typing import List
import time
import effdet_ext._C as _C
def pairwise_iou(boxes1, boxes2) -> torch.Tensor:
"""
Given two lists of boxes of size N and M,
compute the IoU (intersection over union)
between __all__ N x M pairs of boxes.
The box order must be (xmin, ymin, xmax, ymax).
Args:
boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.
Returns:
Tensor: IoU, sized [N,M].
"""
area1 = boxes1[:, 4] # [N,]
area2 = boxes2[:, 4] # [M,]
width_height = torch.min(boxes1[:, None, 2:4], boxes2[:, 2:4]) - torch.max(
boxes1[:, None, :2], boxes2[:, :2]
) # [N,M,2]
width_height.clamp_(min=0) # [N,M,2]
inter = width_height.prod(dim=2) # [N,M]
# handle empty boxes
iou = torch.where(
inter > 0,
inter / (area1[:, None] + area2 - inter),
torch.zeros(1, dtype=inter.dtype, device=inter.device),
)
return iou
def soft_nms(
boxes,
scores,
method_gaussian: bool = True,
sigma: float = 0.5,
iou_threshold: float = .5,
score_threshold: float = 0.005
):
"""
Soft non-max suppression algorithm.
Implementation of [Soft-NMS -- Improving Object Detection With One Line of Codec]
(https://arxiv.org/abs/1704.04503)
Args:
boxes_remain (Tensor[N, ?]):
boxes where NMS will be performed
if Boxes, in (x1, y1, x2, y2) format
if RotatedBoxes, in (x_ctr, y_ctr, width, height, angle_degrees) format
scores_remain (Tensor[N]):
scores for each one of the boxes
method_gaussian (bool): use gaussian method if True, otherwise linear
sigma (float):
parameter for Gaussian penalty function
iou_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
score_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
# st = time.perf_counter()
device = boxes.device
boxes_remain = boxes.clone()
scores_remain = scores.clone()
num_elem = scores_remain.size()[0]
idxs = torch.arange(num_elem)
idxs_out = torch.zeros(num_elem, dtype=torch.int64, device=device)
scores_out = torch.zeros(num_elem, dtype=torch.float32, device=device)
area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
boxes_remain = torch.cat((boxes_remain, area.unsqueeze(1)), dim=1) # [N, 5] BS, x1, y1, x2, y2, area
count: int = 0
# print("[SOFTMAX] before loop starts in softnms {}".format(time.perf_counter() - st))
while scores_remain.numel() > 0:
# st1 = time.perf_counter()
top_idx = 0 # torch.argmax(scores_remain)
idxs_out[count] = idxs[top_idx]
scores_out[count] = scores_remain[top_idx]
count += 1
top_box = boxes_remain[top_idx]
ious = pairwise_iou(top_box.unsqueeze(0), boxes_remain)[0]
# st2 = time.perf_counter()
# print("[SOFTMAX] Before gaussian in softnms {}".format(st2 - st1))
if method_gaussian:
decay = torch.exp(-torch.pow(ious, 2) / sigma)
else:
decay = torch.ones_like(ious)
decay_mask = ious > iou_threshold
decay[decay_mask] = 1 - ious[decay_mask]
# st3 = time.perf_counter()
# print("[SOFTMAX] Gaussian in softnms {}".format(st3 - st2))
scores_remain *= decay
keep = scores_remain > score_threshold
keep[top_idx] = torch.tensor(False, device=device)
boxes_remain = boxes_remain[keep]
scores_remain = scores_remain[keep]
idxs = idxs[keep]
# st4 = time.perf_counter()
# print("[SOFTMAX] Remaining in softnms {}".format(st4 - st3))
# print("[SOFTMAX] Entire loop takes in softnms {}".format(st4 - st1))
# st5 = time.perf_counter()
# print("[SOFTMAX] Remaining in softnms {}".format(st5 - st))
return idxs_out[:count], scores_out[:count]
def batched_nms(
boxes, scores, idxs,
iou_threshold: float = .5,):
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return _C.nms(
boxes_for_nms, scores, iou_threshold
)
def batched_soft_nms(
boxes, scores, idxs,
method_gaussian: bool = True,
sigma: float = 0.5,
iou_threshold: float = .5,
score_threshold: float = 0.001):
"""
Performs soft non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Args:
boxes (Tensor[N, 4]):
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores (Tensor[N]):
scores for each one of the boxes
idxs (Tensor[N]):
indices of the categories for each one of the boxes.
method (str):
one of ['gaussian', 'linear', 'hard']
see paper for details. users encouraged not to use "hard", as this is the
same nms available elsewhere in detectron2
sigma (float):
parameter for Gaussian penalty function
iou_threshold (float):
iou threshold for applying linear decay. Nt from the paper
re-used as threshold for standard "hard" nms
score_threshold (float):
boxes with scores below this threshold are pruned at each iteration.
Dramatically reduces computation time. Authors use values in [10e-4, 10e-2]
Returns:
tuple(Tensor, Tensor):
[0]: int64 tensor with the indices of the elements that have been kept
by Soft NMS, sorted in decreasing order of scores
[1]: float tensor with the re-scored scores of the elements that were kept
"""
if boxes.numel() == 0:
return (
torch.empty((0,), dtype=torch.int64, device=boxes.device),
torch.empty((0,), dtype=torch.float32, device=scores.device),
)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
return soft_nms(
boxes_for_nms, scores, method_gaussian=method_gaussian, sigma=sigma,
iou_threshold=iou_threshold, score_threshold=score_threshold
)
|
TensorFlow2/Segmentation/nnUNet/runtime | runtime | logging | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from abc import ABC, abstractmethod
from typing import Callable
import dllogger
from dllogger import Verbosity
from runtime.utils import rank_zero_only
class Logger(ABC):
@rank_zero_only
@abstractmethod
def log_hyperparams(self, params):
pass
@rank_zero_only
@abstractmethod
def log_metadata(self, metric, metadata):
pass
@rank_zero_only
@abstractmethod
def log_metrics(self, metrics, step=None):
pass
@staticmethod
def _sanitize_params(params):
def _sanitize(val):
if isinstance(val, Callable):
try:
_val = val()
if isinstance(_val, Callable):
return val.__name__
return _val
except Exception:
return getattr(val, "__name__", None)
elif isinstance(val, pathlib.Path):
return str(val)
return val
return {key: _sanitize(val) for key, val in params.items()}
@rank_zero_only
def flush(self):
pass
class LoggerCollection(Logger):
def __init__(self, loggers):
super().__init__()
self.loggers = loggers
def __getitem__(self, index):
return [logger for logger in self.loggers][index]
@rank_zero_only
def log_metrics(self, metrics, step=None):
for logger in self.loggers:
logger.log_metrics(metrics, step)
@rank_zero_only
def log_hyperparams(self, params):
for logger in self.loggers:
logger.log_hyperparams(params)
@rank_zero_only
def log_metadata(self, metric, metadata):
for logger in self.loggers:
logger.log_metadata(metric, metadata)
@rank_zero_only
def flush(self):
for logger in self.loggers:
logger.flush()
class DLLogger(Logger):
def __init__(self, save_dir, filename, append, quiet):
super().__init__()
self._initialize_dllogger(save_dir, filename, append, quiet)
@rank_zero_only
def _initialize_dllogger(self, save_dir, filename, append, quiet):
save_dir.mkdir(parents=True, exist_ok=True)
backends = [
dllogger.JSONStreamBackend(Verbosity.DEFAULT, str(save_dir / filename), append=append),
]
if not quiet:
backends.append(dllogger.StdOutBackend(Verbosity.VERBOSE, step_format=lambda step: f"Step: {step} "))
dllogger.init(backends=backends)
@rank_zero_only
def log_hyperparams(self, params):
params = self._sanitize_params(params)
dllogger.log(step="PARAMETER", data=params)
@rank_zero_only
def log_metadata(self, metric, metadata):
dllogger.metadata(metric, metadata)
@rank_zero_only
def log_metrics(self, metrics, step=None):
if step is None:
step = tuple()
dllogger.log(step=step, data=metrics)
@rank_zero_only
def flush(self):
dllogger.flush()
def get_logger(args):
loggers = []
if args.use_dllogger:
loggers.append(
DLLogger(save_dir=args.results, filename=args.logname, append=args.resume_training, quiet=args.quiet)
)
return LoggerCollection(loggers)
|
PyTorch/Translation/Transformer/scripts | scripts | run_inference | : ${FP16:=0}
[ ${FP16} -ne 0 ] && PREC="--fp16"
sacrebleu -t wmt14/full -l en-de --echo src | \
python inference.py \
--buffer-size 5000 \
--path /checkpoints/transformer_pyt_20.06.pt \
--max-tokens 10240 \
--fuse-dropout-add \
--remove-bpe \
--bpe-codes /checkpoints/bpe_codes \
${PREC} \
| sacrebleu -t wmt14/full -l en-de -lc
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer | maintainer | exceptions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit/model_analyzer | model_analyzer | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode, ModelAnalyzerReportMode # noqa: F401
from .model_analyzer_config import ModelAnalyzerConfig # noqa: F401
|
PyTorch/Classification/ConvNets/image_classification | image_classification | training | # Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from copy import deepcopy
from functools import wraps
from typing import Callable, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from . import logger as log
from . import utils
from .logger import TrainingMetrics, ValidationMetrics
from .models.common import EMA
class Executor:
def __init__(
self,
model: nn.Module,
loss: Optional[nn.Module],
cuda: bool = True,
memory_format: torch.memory_format = torch.contiguous_format,
amp: bool = False,
scaler: Optional[torch.cuda.amp.GradScaler] = None,
divide_loss: int = 1,
ts_script: bool = False,
):
assert not (amp and scaler is None), "Gradient Scaler is needed for AMP"
def xform(m: nn.Module) -> nn.Module:
if cuda:
m = m.cuda()
m.to(memory_format=memory_format)
return m
self.model = xform(model)
if ts_script:
self.model = torch.jit.script(self.model)
self.ts_script = ts_script
self.loss = xform(loss) if loss is not None else None
self.amp = amp
self.scaler = scaler
self.is_distributed = False
self.divide_loss = divide_loss
self._fwd_bwd = None
self._forward = None
def distributed(self, gpu_id):
self.is_distributed = True
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
self.model = DDP(self.model, device_ids=[gpu_id], output_device=gpu_id)
torch.cuda.current_stream().wait_stream(s)
def _fwd_bwd_fn(
self,
input: torch.Tensor,
target: torch.Tensor,
) -> torch.Tensor:
with autocast(enabled=self.amp):
loss = self.loss(self.model(input), target)
loss /= self.divide_loss
self.scaler.scale(loss).backward()
return loss
def _forward_fn(
self, input: torch.Tensor, target: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
with torch.no_grad(), autocast(enabled=self.amp):
output = self.model(input)
loss = None if self.loss is None else self.loss(output, target)
return output if loss is None else loss, output
def optimize(self, fn):
return fn
@property
def forward_backward(self):
if self._fwd_bwd is None:
if self.loss is None:
raise NotImplementedError(
"Loss must not be None for forward+backward step"
)
self._fwd_bwd = self.optimize(self._fwd_bwd_fn)
return self._fwd_bwd
@property
def forward(self):
if self._forward is None:
self._forward = self.optimize(self._forward_fn)
return self._forward
def train(self):
self.model.train()
if self.loss is not None:
self.loss.train()
def eval(self):
self.model.eval()
if self.loss is not None:
self.loss.eval()
class Trainer:
def __init__(
self,
executor: Executor,
optimizer: torch.optim.Optimizer,
grad_acc_steps: int,
ema: Optional[float] = None,
):
self.executor = executor
self.optimizer = optimizer
self.grad_acc_steps = grad_acc_steps
self.use_ema = False
if ema is not None:
self.ema_executor = deepcopy(self.executor)
self.ema = EMA(ema, self.ema_executor.model)
self.use_ema = True
self.optimizer.zero_grad(set_to_none=True)
self.steps_since_update = 0
def train(self):
self.executor.train()
if self.use_ema:
self.ema_executor.train()
def eval(self):
self.executor.eval()
if self.use_ema:
self.ema_executor.eval()
def train_step(self, input, target, step=None):
loss = self.executor.forward_backward(input, target)
self.steps_since_update += 1
if self.steps_since_update == self.grad_acc_steps:
if self.executor.scaler is not None:
self.executor.scaler.step(self.optimizer)
self.executor.scaler.update()
else:
self.optimizer.step()
self.optimizer.zero_grad()
self.steps_since_update = 0
torch.cuda.synchronize()
if self.use_ema:
self.ema(self.executor.model, step=step)
return loss
def validation_steps(self) -> Dict[str, Callable]:
vsd: Dict[str, Callable] = {"val": self.executor.forward}
if self.use_ema:
vsd["val_ema"] = self.ema_executor.forward
return vsd
def state_dict(self) -> dict:
res = {
"state_dict": self.executor.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
if self.use_ema:
res["state_dict_ema"] = self.ema_executor.model.state_dict()
return res
def train(
train_step,
train_loader,
lr_scheduler,
grad_scale_fn,
log_fn,
timeout_handler,
prof=-1,
step=0,
):
interrupted = False
end = time.time()
data_iter = enumerate(train_loader)
for i, (input, target) in data_iter:
bs = input.size(0)
lr = lr_scheduler(i)
data_time = time.time() - end
loss = train_step(input, target, step=step + i)
it_time = time.time() - end
with torch.no_grad():
if torch.distributed.is_initialized():
reduced_loss = utils.reduce_tensor(loss.detach())
else:
reduced_loss = loss.detach()
log_fn(
compute_ips=utils.calc_ips(bs, it_time - data_time),
total_ips=utils.calc_ips(bs, it_time),
data_time=data_time,
compute_time=it_time - data_time,
lr=lr,
loss=reduced_loss.item(),
grad_scale=grad_scale_fn(),
)
end = time.time()
if prof > 0 and (i + 1 >= prof):
time.sleep(5)
break
if ((i + 1) % 20 == 0) and timeout_handler.interrupted:
time.sleep(5)
interrupted = True
break
return interrupted
def validate(infer_fn, val_loader, log_fn, prof=-1, with_loss=True, topk=5):
top1 = log.AverageMeter()
# switch to evaluate mode
end = time.time()
data_iter = enumerate(val_loader)
for i, (input, target) in data_iter:
bs = input.size(0)
data_time = time.time() - end
if with_loss:
loss, output = infer_fn(input, target)
else:
output = infer_fn(input)
with torch.no_grad():
precs = utils.accuracy(output.data, target, topk=(1, topk))
if torch.distributed.is_initialized():
if with_loss:
reduced_loss = utils.reduce_tensor(loss.detach())
precs = map(utils.reduce_tensor, precs)
else:
if with_loss:
reduced_loss = loss.detach()
precs = map(lambda t: t.item(), precs)
infer_result = {f"top{k}": (p, bs) for k, p in zip((1, topk), precs)}
if with_loss:
infer_result["loss"] = (reduced_loss.item(), bs)
torch.cuda.synchronize()
it_time = time.time() - end
top1.record(infer_result["top1"][0], bs)
log_fn(
compute_ips=utils.calc_ips(bs, it_time - data_time),
total_ips=utils.calc_ips(bs, it_time),
data_time=data_time,
compute_time=it_time - data_time,
**infer_result,
)
end = time.time()
if (prof > 0) and (i + 1 >= prof):
time.sleep(5)
break
return top1.get_val()
# Train loop {{{
def train_loop(
trainer: Trainer,
lr_scheduler,
train_loader,
train_loader_len,
val_loader,
logger,
best_prec1=0,
start_epoch=0,
end_epoch=0,
early_stopping_patience=-1,
prof=-1,
skip_training=False,
skip_validation=False,
save_checkpoints=True,
checkpoint_dir="./",
checkpoint_filename="checkpoint.pth.tar",
keep_last_n_checkpoints=0,
topk=5,
):
checkpointer = utils.Checkpointer(
last_filename=checkpoint_filename,
checkpoint_dir=checkpoint_dir,
keep_last_n=keep_last_n_checkpoints,
)
train_metrics = TrainingMetrics(logger)
val_metrics = {
k: ValidationMetrics(logger, k, topk) for k in trainer.validation_steps().keys()
}
training_step = trainer.train_step
prec1 = -1
if early_stopping_patience > 0:
epochs_since_improvement = 0
print(f"RUNNING EPOCHS FROM {start_epoch} TO {end_epoch}")
with utils.TimeoutHandler() as timeout_handler:
interrupted = False
for epoch in range(start_epoch, end_epoch):
if logger is not None:
logger.start_epoch()
if not skip_training:
if logger is not None:
data_iter = logger.iteration_generator_wrapper(
train_loader, mode="train"
)
else:
data_iter = train_loader
trainer.train()
interrupted = train(
training_step,
data_iter,
lambda i: lr_scheduler(trainer.optimizer, i, epoch),
trainer.executor.scaler.get_scale,
train_metrics.log,
timeout_handler,
prof=prof,
step=epoch * train_loader_len,
)
if not skip_validation:
trainer.eval()
for k, infer_fn in trainer.validation_steps().items():
if logger is not None:
data_iter = logger.iteration_generator_wrapper(
val_loader, mode="val"
)
else:
data_iter = val_loader
step_prec1, _ = validate(
infer_fn,
data_iter,
val_metrics[k].log,
prof=prof,
topk=topk,
)
if k == "val":
prec1 = step_prec1
if prec1 > best_prec1:
is_best = True
best_prec1 = prec1
else:
is_best = False
else:
is_best = False
best_prec1 = 0
if logger is not None:
logger.end_epoch()
if save_checkpoints and (
not torch.distributed.is_initialized()
or torch.distributed.get_rank() == 0
):
checkpoint_state = {
"epoch": epoch + 1,
"best_prec1": best_prec1,
**trainer.state_dict(),
}
checkpointer.save_checkpoint(
checkpoint_state,
is_best,
filename=f"checkpoint_{epoch:04}.pth.tar",
)
if early_stopping_patience > 0:
if not is_best:
epochs_since_improvement += 1
else:
epochs_since_improvement = 0
if epochs_since_improvement >= early_stopping_patience:
break
if interrupted:
break
# }}}
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit | deployment_toolkit | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
PyTorch/LanguageModeling/BERT/triton/runner/maintainer | maintainer | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container
from .docker.maintainer import DockerMaintainer
from .maintainer import Maintainer
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/scripts/autobench | autobench | ngc_electricity_HP_search | NGC: &NGC
hostname: ngc
instance: dgx1v.32g.8.norm.beta
job_name: "ml-model.tft electricity HP search"
docker_image: nvcr.io/nvidian/swdl/jbaczek:tft_pyt
datasets:
/data: 78291
workspaces:
/ws: VUMFFB3uSv25FDlkXg80Vw
download_dir: /home/jbaczek/Downloads
jobs:
- steps:
- EPOCHS=30 DATASET=electricity NGPU=8 DROPOUT=0.1 LR=5e-4 H_SIZE=128 N_HEADS=4 bash scripts/run_hp_search.sh
backend: *NGC
- steps:
- EPOCHS=30 DATASET=electricity NGPU=8 DROPOUT=0.1 LR=5e-4 H_SIZE=128 N_HEADS=2 bash scripts/run_hp_search.sh
backend: *NGC
reports:
filename: electricity_hp_search
types:
- xls
|
PyTorch/Detection/Efficientdet/data | data | dataloader_test | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import yaml
import math
import os
from datetime import datetime
import ctypes
import numpy as np
import torch
import torchvision.utils
from effdet.config import get_efficientdet_config
from data import create_loader, CocoDetection
from utils.utils import AverageMeter
from data.loader import IterationBasedBatchSampler
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
def add_bool_arg(parser, name, default=False, help=''): # FIXME move to utils
dest_name = name.replace('-', '_')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=dest_name, action='store_true', help=help)
group.add_argument('--no-' + name, dest=dest_name, action='store_false', help=help)
parser.set_defaults(**{dest_name: default})
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset / Model parameters
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-b', '--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 32)')
parser.add_argument('--input-size', type=int, default=512, metavar='N',
help='input image size (default: 512)')
parser.add_argument('--prefetcher', action='store_true', default=True,
help='enable fast prefetcher')
parser.add_argument('--train-interpolation', type=str, default='random',
help='Training interpolation (random, bilinear, bicubic default: "random")')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--pin-mem', action='store_true', default=False,
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument("--local_rank", default=os.getenv('LOCAL_RANK', 0), type=int)
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def test_number_of_iters_and_elements():
for batch_size in [4]:
for drop_last in [False, True]:
dataset = [i for i in range(10)]
sampler = torch.utils.data.sampler.SequentialSampler(dataset)
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, batch_size, drop_last=drop_last
)
iter_sampler = IterationBasedBatchSampler(
batch_sampler
)
iterator = iter(iter_sampler)
print("Len of sampler {} ".format(len(iter_sampler)))
print("=====================================================")
print("Test batch size {} drop last {}".format(batch_size, drop_last))
steps_per_epoch = int( np.ceil(len(dataset) / batch_size) )
i = 0
for epoch in range(3):
for _ in range(steps_per_epoch):
batch = next(iterator)
start = (i % len(batch_sampler)) * batch_size
end = min(start + batch_size, len(dataset))
expected = [x for x in range(start, end)]
print("Epoch {} iteration {} batch {}".format(epoch, i, batch))
i += 1
def main():
args, args_text = _parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.device = 'cuda:0'
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.device = 'cuda:%d' % args.local_rank
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
model_name = 'efficientdet_d0'
data_config = get_efficientdet_config(model_name)
train_anno_set = 'train2017'
train_annotation_path = os.path.join(args.data, 'annotations', f'instances_{train_anno_set}.json')
train_image_dir = train_anno_set
dataset_train = CocoDetection(os.path.join(args.data, train_image_dir), train_annotation_path, data_config)
print("Length of training dataset {}".format(len(dataset_train)))
loader_train = create_loader(
dataset_train,
input_size=args.input_size,
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
#re_prob=args.reprob, # FIXME add back various augmentations
#re_mode=args.remode,
#re_count=args.recount,
#re_split=args.resplit,
#color_jitter=args.color_jitter,
#auto_augment=args.aa,
interpolation=args.train_interpolation,
#mean=data_config['mean'],
#std=data_config['std'],
num_workers=args.workers,
distributed=args.distributed,
#collate_fn=collate_fn,
pin_mem=args.pin_mem,
)
print("Iterations per epoch {}".format(math.ceil( len(dataset_train) / ( args.batch_size * args.world_size ))))
data_time_m = AverageMeter()
end = time.time()
if args.local_rank == 0:
print("Starting to test...")
for batch_idx, (input, target) in enumerate(loader_train):
data_time_m.update(time.time() - end)
if args.local_rank == 0 and batch_idx % 20 == 0:
print("batch time till {} is {}".format(batch_idx, data_time_m.avg))
end = time.time()
if __name__ == "__main__":
main()
#### USAGE ####
#
# NUM_PROC=8
# python -m torch.distributed.launch --nproc_per_node=$NUM_PROC data/dataloader_test.py /workspace/object_detection/datasets/coco -b 64 --workers 16
# |
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_FP32_1GPU_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32 on 1 GPU using 16 batch size (16 per GPU)
# Usage ./UNet_FP32_1GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=16 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--nouse_tf_amp \
--use_xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
TensorFlow2/Classification/ConvNets/dataloader | dataloader | dataset_factory | # Lint as: python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset utilities for vision tasks using TFDS and tf.data.Dataset."""
from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function
import os
from typing import Any, List, Optional, Tuple, Mapping, Union
import functools
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow import keras
from dataloader import augment
from dataloader import preprocessing
from dataloader import Dali
import horovod.tensorflow.keras as hvd
import nvidia.dali.plugin.tf as dali_tf
AUGMENTERS = {
'autoaugment': augment.AutoAugment,
'randaugment': augment.RandAugment,
}
def cutmix_mask(alpha, h, w):
"""[summary]
Returns image mask of size wxh for CutMix where the masked region is one
and bakground is zero. To create the mask, we first sample the top-left
corner of the masked region and then determine its width and height by
sampling a scale ratio from the beta distribution parameterized by alpha.
The masked region determined above is painted white and then zero-padded
to have width w and height h.
Args:
alpha ([float]): used to sample a scale ratio
h ([integer]): width of the mask image
w ([integer]): height of the mask image
Returns:
[type]: [description]
"""
if alpha == 0:
return tf.zeros((h,w,1))
r_x = tf.random.uniform([], 0, w, tf.int32)
r_y = tf.random.uniform([], 0, h, tf.int32)
area = tf.compat.v1.distributions.Beta(alpha, alpha).sample()
patch_ratio = tf.cast(tf.math.sqrt(1 - area), tf.float32)
r_w = tf.cast(patch_ratio * tf.cast(w, tf.float32), tf.int32)
r_h = tf.cast(patch_ratio * tf.cast(h, tf.float32), tf.int32)
bbx1 = tf.clip_by_value(tf.cast(r_x - r_w // 2, tf.int32), 0, w)
bby1 = tf.clip_by_value(tf.cast(r_y - r_h // 2, tf.int32), 0, h)
bbx2 = tf.clip_by_value(tf.cast(r_x + r_w // 2, tf.int32), 0, w)
bby2 = tf.clip_by_value(tf.cast(r_y + r_h // 2, tf.int32), 0, h)
# Create the binary mask.
pad_left = bbx1
pad_top = bby1
pad_right = tf.maximum(w - bbx2, 0)
pad_bottom = tf.maximum(h - bby2, 0)
r_h = bby2 - bby1
r_w = bbx2 - bbx1
mask = tf.pad(
tf.ones((r_h, r_w)),
paddings=[[pad_top, pad_bottom], [pad_left, pad_right]],
mode='CONSTANT',
constant_values=0)
mask.set_shape((h, w))
return mask[..., None] # Add channel dim.
def mixup(batch_size, alpha, images, labels, defer_img_mixing):
"""Applies Mixup regularization to a batch of images and labels.
[1] Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz
Mixup: Beyond Empirical Risk Minimization.
ICLR'18, https://arxiv.org/abs/1710.09412
Arguments:
batch_size: The input batch size for images and labels.
alpha: Float that controls the strength of Mixup regularization.
images: A batch of images of shape [batch_size, ...]
labels: A batch of labels of shape [batch_size, num_classes]
defer_img_mixing: If true, labels are mixed in this function but image
mixing is postponed until the data arrives on the compute device. This
can accelerate the data pipeline. Note that it is the user's responsibility
to implement image mixing in the module that defines the forward pass of the
network. To ensure that the subsequent on-device image mixing is consistent
with label mixing performed here, this function returns the mixing weights
as well.
Returns:
A tuple of ((images, mix_weights), labels) with the same dimensions as the input with
Mixup regularization applied.
"""
if alpha == 0.0:
# returning 1s as mixing weights means to mixup
return (images, tf.ones((batch_size,1,1,1))), labels
mix_weight = tf.compat.v1.distributions.Beta(alpha, alpha).sample([batch_size, 1])
mix_weight = tf.maximum(mix_weight, 1. - mix_weight)
img_weight = tf.cast(tf.reshape(mix_weight, [batch_size, 1, 1, 1]), images.dtype)
labels_weight = tf.cast(mix_weight, labels.dtype)
# Mixup: taking a weighted sum with the same batch in reverse.
labels_mix = labels * labels_weight + labels[::-1] * (1. - labels_weight)
if not defer_img_mixing:
images_mix = images * img_weight + images[::-1] * (1. - img_weight)
else:
# postpone image mixing
images_mix = images
return (images_mix, img_weight), labels_mix
def cutmix(images, labels, masks, defer_img_mixing):
"""[summary]
Applies CutMix regularization to a batch of images and labels.
Reference: https://arxiv.org/pdf/1905.04899.pdf
Args:
images: a Tensor of batched images
labels: a Tensor of batched labels.
masks: a Tensor of batched masks.
defer_img_mixing: If true, labels are mixed in this function but image
mixing is postponed until the data arrives on the compute device. This
can accelerate the data pipeline. Note that it is the user's responsibility
to implement image mixing in the module that defines the forward pass of the
network. To ensure that the subsequent on-device image mixing is consistent
with label mixing performed here, this function returns the mixing masks
as well.
Returns:
A tuple of ((images, mix_masks), labels)
"""
mix_area = tf.reduce_sum(masks) / tf.cast(tf.size(masks), masks.dtype)
mix_area = tf.cast(mix_area, labels.dtype)
mixed_label = (1. - mix_area) * labels + mix_area * labels[::-1]
masks = tf.cast(masks, images.dtype)
if not defer_img_mixing:
mixed_images = (1. - masks) * images + masks * images[::-1]
else:
# postpone image mixing
mixed_images = images
return (mixed_images, masks), mixed_label
def mixing(batch_size, mixup_alpha, cutmix_alpha, defer_img_mixing, features, labels):
"""Applies mixing regularization to a batch of images and labels. If both
mixup and cutmix requested, the batch is halved followed by applying
mixup on one half and cutmix on the other half.
Arguments:
batch_size: The input batch size for images and labels.
mixup_alpha: Float that controls the strength of Mixup regularization.
cutmix_alpha: FLoat that controls the strength of Cutmix regularization.
defer_img_mixing: If true, the image mixing ops will be postponed.
labels: a dict of batched labels.
Returns:
A new dict of features with updated images and labels with the same
dimensions as the input.
"""
image = features['image']
label = labels['label']
mix_masks = features['cutmix_mask']
if mixup_alpha and cutmix_alpha:
# split the batch half-half, and apply mixup and cutmix for each half.
bs = batch_size // 2
(img1, mix_weights), lab1 = mixup(bs, mixup_alpha, image[:bs], label[:bs], defer_img_mixing)
(img2, mix_masks), lab2 = cutmix(image[bs:], label[bs:], mix_masks[bs:], defer_img_mixing)
image = tf.concat([img1, img2], axis=0)
label = tf.concat([lab1, lab2], axis=0)
elif mixup_alpha:
# only mixup
(image, mix_weights), label = mixup(batch_size, mixup_alpha, image, label, defer_img_mixing)
# mix_masks = tf.zeros_like(mix_masks) -> mix_masks is already all 0s (see cutmix fn)
elif cutmix_alpha:
# only cutmix
(image, mix_masks), label = cutmix(image, label, mix_masks, defer_img_mixing)
mix_weights = tf.ones((batch_size,1,1,1)) # 1s mean no mixup
else:
# mix_masks = tf.zeros_like(mix_masks) -> mix_masks is already all 0s (see cutmix fn)
mix_weights = tf.ones((batch_size,1,1,1)) # 1s mean no mixup
features['image'] = image
features['mixup_weight'] = mix_weights
features['cutmix_mask'] = mix_masks
return features, label
def mixing_lite(images, mixup_weights, cutmix_masks, batch_size, do_mixup, do_cutmix):
"""[summary]
This function, which is a simplified version of the mixing function (see above),
will be called in the model module when the user wishes to perform image mixing
on-device (defer_image_mixing=True).
Note: the logic here must be identical to that of the mixing fn above.
Args:
images: a Tensor of batched images.
mixup_weights: a Tensor of batched mixup weights.
cutmix_masks: a Tensor of batched cutmix masks.
batch_size: static batch size.
do_mixup: boolean, to determine if mixup is needed
do_cutmix: boolean, to determine if cutmix is needed
Returns:
a Tensor of batched MIXED images
"""
if do_mixup and do_cutmix:
# split the batch half-half, and apply mixup and cutmix for each half.
bs = batch_size // 2
images_mixup = images[:bs] * mixup_weights + images[:bs][::-1] * (1. - mixup_weights)
images_cutmix = images[bs:] * (1. - cutmix_masks) * + images[bs:][::-1] * cutmix_masks
# concat order must be consistent with mixing fn
return tf.concat([images_mixup, images_cutmix], axis=0)
elif do_mixup:
return images * mixup_weights + images[::-1] * (1. - mixup_weights)
elif do_cutmix:
return images * (1. - cutmix_masks) + images[::-1] * cutmix_masks
else:
return images
class Dataset:
"""An object for building datasets.
Allows building various pipelines fetching examples, preprocessing, etc.
Maintains additional state information calculated from the dataset, i.e.,
training set split, batch size, and number of steps (batches).
"""
def __init__(self,
data_dir,
index_file_dir,
split='train',
num_classes=None,
image_size=224,
num_channels=3,
batch_size=128,
dtype='float32',
one_hot=False,
use_dali=False,
augmenter=None,
shuffle_buffer_size=10000,
file_shuffle_buffer_size=1024,
cache=False,
mean_subtract=False,
standardize=False,
augmenter_params=None,
cutmix_alpha=0.0,
mixup_alpha=0.0,
defer_img_mixing=True,
hvd_size=None,
disable_map_parallelization=False
):
"""Initialize the builder from the config."""
if not os.path.exists(data_dir):
raise FileNotFoundError('Cannot find data dir: {}'.format(data_dir))
if one_hot and num_classes is None:
raise FileNotFoundError('Number of classes is required for one_hot')
self._data_dir = data_dir
self._split = split
self._image_size = image_size
self._num_classes = num_classes
self._num_channels = num_channels
self._batch_size = batch_size
self._dtype = dtype
self._one_hot = one_hot
self._augmenter_name = augmenter
self._shuffle_buffer_size = shuffle_buffer_size
self._file_shuffle_buffer_size = file_shuffle_buffer_size
self._cache = cache
self._mean_subtract = mean_subtract
self._standardize = standardize
self._index_file = index_file_dir
self._use_dali = use_dali
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.defer_img_mixing = defer_img_mixing
self.disable_map_parallelization = disable_map_parallelization
self._num_gpus = hvd.size() if not hvd_size else hvd_size
if self._augmenter_name is not None:
augmenter = AUGMENTERS.get(self._augmenter_name, None)
params = augmenter_params or {}
self._augmenter = augmenter(**params) if augmenter is not None else None
else:
self._augmenter = None
@property
def is_training(self) -> bool:
"""Whether this is the training set."""
return self._split == 'train'
@property
def global_batch_size(self) -> int:
"""The batch size, multiplied by the number of replicas (if configured)."""
return self._batch_size * self._num_gpus
@property
def local_batch_size(self):
"""The base unscaled batch size."""
return self._batch_size
@property
def dtype(self) -> tf.dtypes.DType:
"""Converts the config's dtype string to a tf dtype.
Returns:
A mapping from string representation of a dtype to the `tf.dtypes.DType`.
Raises:
ValueError if the config's dtype is not supported.
"""
dtype_map = {
'float32': tf.float32,
'bfloat16': tf.bfloat16,
'float16': tf.float16,
'fp32': tf.float32,
'bf16': tf.bfloat16,
}
try:
return dtype_map[self._dtype]
except:
raise ValueError('{} provided key. Invalid DType provided. Supported types: {}'.format(self._dtype,
dtype_map.keys()))
@property
def image_size(self) -> int:
"""The size of each image (can be inferred from the dataset)."""
return int(self._image_size)
@property
def num_channels(self) -> int:
"""The number of image channels (can be inferred from the dataset)."""
return int(self._num_channels)
@property
def num_classes(self) -> int:
"""The number of classes (can be inferred from the dataset)."""
return int(self._num_classes)
@property
def num_steps(self) -> int:
"""The number of classes (can be inferred from the dataset)."""
return int(self._num_steps)
def set_shapes(self, batch_size, features, labels):
"""Statically set the batch_size dimension."""
features['image'].set_shape(features['image'].get_shape().merge_with(
tf.TensorShape([batch_size, None, None, None])))
labels['label'].set_shape(labels['label'].get_shape().merge_with(
tf.TensorShape([batch_size, None])))
return features, labels
def build(self) -> tf.data.Dataset:
"""Construct a dataset end-to-end and return it.
Args:
input_context: An optional context provided by `tf.distribute` for
cross-replica training.
Returns:
A TensorFlow dataset outputting batched images and labels.
"""
if self._use_dali:
print("Using dali for {train} dataloading".format(train = "training" if self.is_training else "validation"))
tfrec_filenames = sorted(tf.io.gfile.glob(os.path.join(self._data_dir, '%s-*' % self._split)))
tfrec_idx_filenames = sorted(tf.io.gfile.glob(os.path.join(self._index_file, '%s-*' % self._split)))
# # Create pipeline
dali_pipeline = Dali.DaliPipeline(tfrec_filenames=tfrec_filenames,
tfrec_idx_filenames=tfrec_idx_filenames,
height=self._image_size,
width=self._image_size,
batch_size=self.local_batch_size,
num_threads=1,
device_id=hvd.local_rank(),
shard_id=hvd.rank(),
num_gpus=hvd.size(),
num_classes=self.num_classes,
deterministic=False,
dali_cpu=False,
training=self.is_training)
# Define shapes and types of the outputs
shapes = (
(self.local_batch_size, self._image_size, self._image_size, 3),
(self.local_batch_size, self._num_classes))
dtypes = (
tf.float32,
tf.float32)
# Create dataset
dataset = dali_tf.DALIDataset(
pipeline=dali_pipeline,
batch_size=self.local_batch_size,
output_shapes=shapes,
output_dtypes=dtypes,
device_id=hvd.local_rank())
# if self.is_training and self._augmenter:
# print('Augmenting with {}'.format(self._augmenter))
# dataset.unbatch().map(self.augment_pipeline, num_parallel_calls=tf.data.experimental.AUTOTUNE).batch(self.local_batch_size)
return dataset
else:
print("Using tf native pipeline for {train} dataloading".format(train = "training" if self.is_training else "validation"))
dataset = self.load_records()
dataset = self.pipeline(dataset)
return dataset
# def augment_pipeline(self, image, label) -> Tuple[tf.Tensor, tf.Tensor]:
# image = self._augmenter.distort(image)
# return image, label
def load_records(self) -> tf.data.Dataset:
"""Return a dataset loading files with TFRecords."""
if self._data_dir is None:
raise ValueError('Dataset must specify a path for the data files.')
file_pattern = os.path.join(self._data_dir,
'{}*'.format(self._split))
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
return dataset
def pipeline(self, dataset: tf.data.Dataset) -> tf.data.Dataset:
"""Build a pipeline fetching, shuffling, and preprocessing the dataset.
Args:
dataset: A `tf.data.Dataset` that loads raw files.
Returns:
A TensorFlow dataset outputting batched images and labels.
"""
# This can help resolve OOM issues when using only 1 GPU for training
options = tf.data.Options()
options.experimental_optimization.map_parallelization = (not self.disable_map_parallelization)
dataset = dataset.with_options(options)
if self._num_gpus > 1:
# For multi-host training, we want each hosts to always process the same
# subset of files. Each host only sees a subset of the entire dataset,
# allowing us to cache larger datasets in memory.
dataset = dataset.shard(self._num_gpus, hvd.rank())
if self.is_training:
# Shuffle the input files.
dataset.shuffle(buffer_size=self._file_shuffle_buffer_size)
if self.is_training and not self._cache:
dataset = dataset.repeat()
# Read the data from disk in parallel
dataset = dataset.interleave(
tf.data.TFRecordDataset,
cycle_length=10,
block_length=1,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self._cache:
dataset = dataset.cache()
if self.is_training:
dataset = dataset.shuffle(self._shuffle_buffer_size)
dataset = dataset.repeat()
# Parse, pre-process, and batch the data in parallel
preprocess = self.parse_record
dataset = dataset.map(preprocess,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self._num_gpus > 1:
# The batch size of the dataset will be multiplied by the number of
# replicas automatically when strategy.distribute_datasets_from_function
# is called, so we use local batch size here.
dataset = dataset.batch(self.local_batch_size,
drop_remainder=self.is_training)
else:
dataset = dataset.batch(self.global_batch_size,
drop_remainder=self.is_training)
# apply Mixup/CutMix only during training, if requested in the data pipeline,
# otherwise they will be applied in the model module on device
mixup_alpha = self.mixup_alpha if self.is_training else 0.0
cutmix_alpha = self.cutmix_alpha if self.is_training else 0.0
dataset = dataset.map(
functools.partial(mixing, self.local_batch_size, mixup_alpha, cutmix_alpha, self.defer_img_mixing),
num_parallel_calls=64)
# Assign static batch size dimension
# dataset = dataset.map(
# functools.partial(self.set_shapes, batch_size),
# num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetch overlaps in-feed with training
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def parse_record(self, record: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""Parse an ImageNet record from a serialized string Tensor."""
keys_to_features = {
'image/encoded':
tf.io.FixedLenFeature((), tf.string, ''),
'image/format':
tf.io.FixedLenFeature((), tf.string, 'jpeg'),
'image/class/label':
tf.io.FixedLenFeature([], tf.int64, -1),
'image/class/text':
tf.io.FixedLenFeature([], tf.string, ''),
'image/object/bbox/xmin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax':
tf.io.VarLenFeature(dtype=tf.float32),
'image/object/class/label':
tf.io.VarLenFeature(dtype=tf.int64),
}
parsed = tf.io.parse_single_example(record, keys_to_features)
label = tf.reshape(parsed['image/class/label'], shape=[1])
label = tf.cast(label, dtype=tf.int32)
# Subtract one so that labels are in [0, 1000)
label -= 1
image_bytes = tf.reshape(parsed['image/encoded'], shape=[])
image, label = self.preprocess(image_bytes, label)
# populate features and labels dict
features = dict()
labels = dict()
features['image'] = image
features['is_tr_split'] = self.is_training
if self.cutmix_alpha:
features['cutmix_mask'] = cutmix_mask(self.cutmix_alpha, self._image_size, self._image_size)
else:
features['cutmix_mask'] = tf.zeros((self._image_size, self._image_size,1))
labels['label'] = label
return features, labels
def preprocess(self, image: tf.Tensor, label: tf.Tensor
) -> Tuple[tf.Tensor, tf.Tensor]:
"""Apply image preprocessing and augmentation to the image and label."""
if self.is_training:
image = preprocessing.preprocess_for_train(
image,
image_size=self._image_size,
mean_subtract=self._mean_subtract,
standardize=self._standardize,
dtype=self.dtype,
augmenter=self._augmenter)
else:
image = preprocessing.preprocess_for_eval(
image,
image_size=self._image_size,
num_channels=self._num_channels,
mean_subtract=self._mean_subtract,
standardize=self._standardize,
dtype=self.dtype)
label = tf.cast(label, tf.int32)
if self._one_hot:
label = tf.one_hot(label, self.num_classes)
label = tf.reshape(label, [self.num_classes])
return image, label
# @classmethod
# def from_params(cls, *args, **kwargs):
# """Construct a dataset builder from a default config and any overrides."""
# config = DatasetConfig.from_args(*args, **kwargs)
# return cls(config)
|
PyTorch/Forecasting/TFT | TFT | train | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import pickle
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
from apex.optimizers import FusedAdam
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.cuda import amp
import numpy as np
import dllogger
from modeling import TemporalFusionTransformer
from configuration import CONFIGS
from data_utils import load_dataset
from log_helper import setup_logger
from criterions import QuantileLoss
from inference import predict
from utils import PerformanceMeter, print_once
import gpu_affinity
from ema import ModelEma
def main(args):
### INIT DISTRIBUTED
args.distributed_world_size = int(os.environ.get('WORLD_SIZE', 1))
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
if args.distributed_world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://')
print_once(f'Distributed training with {args.distributed_world_size} GPUs')
args.distributed_rank = dist.get_rank()
torch.cuda.set_device(args.local_rank)
torch.cuda.synchronize()
# Enable CuDNN autotuner
nproc_per_node = torch.cuda.device_count()
if args.affinity != 'disabled':
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
torch.backends.cudnn.benchmark = True
if args.seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
setup_logger(args)
config = CONFIGS[args.dataset]()
if args.overwrite_config:
config.__dict__.update(json.loads(args.overwrite_config))
dllogger.log(step='HPARAMS', data={**vars(args), **vars(config)}, verbosity=1)
train_loader, valid_loader, test_loader = load_dataset(args, config)
model = TemporalFusionTransformer(config).cuda()
if args.ema_decay:
model_ema = ModelEma(model, decay=args.ema_decay)
# Run dummy iteration to initialize lazy modules
dummy_batch = next(iter(train_loader))
dummy_batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in dummy_batch.items()}
model(dummy_batch)
criterion = QuantileLoss(config).cuda()
optimizer = FusedAdam(model.parameters(), lr=args.lr)
if args.distributed_world_size > 1:
model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
print_once('Model params: {}'.format(sum(p.numel() for p in model.parameters())))
global_step = 0
perf_meter = PerformanceMeter(benchmark_mode=not args.disable_benchmark)
if args.use_amp:
scaler = amp.GradScaler(init_scale=32768.0)
for epoch in range(args.epochs):
start = time.time()
dllogger.log(step=global_step, data={'epoch': epoch}, verbosity=1)
model.train()
for local_step, batch in enumerate(train_loader):
perf_meter.reset_current_lap()
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
with torch.jit.fuser("fuser2"), amp.autocast(enabled=args.use_amp):
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
loss = p_losses.sum()
if global_step == 0 and args.ema_decay:
model_ema(batch)
if args.use_amp:
scaler.scale(loss).backward()
else:
loss.backward()
if not args.grad_accumulation or (global_step+1) % args.grad_accumulation == 0:
if args.use_amp:
scaler.unscale_(optimizer)
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
if args.use_amp:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
optimizer.zero_grad()
if args.ema_decay:
model_ema.update(model)
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses /= args.distributed_world_size
loss = p_losses.sum()
torch.cuda.synchronize()
ips = perf_meter.update(args.batch_size * args.distributed_world_size,
exclude_from_total=local_step in [0, 1, 2, len(train_loader)-1])
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': loss.item(), 'items/s':ips}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
global_step += 1
validate(args, config, model_ema if args.ema_decay else model, criterion, valid_loader, global_step)
if validate.early_stop_c >= args.early_stopping:
print_once('Early stopping')
break
### TEST PHASE ###
state_dict = torch.load(os.path.join(args.results, 'checkpoint.pt'), map_location='cpu')
if isinstance(model, DDP):
model.module.load_state_dict(state_dict['model'])
else:
model.load_state_dict(state_dict['model'])
model.cuda().eval()
tgt_scalers = pickle.load(open(os.path.join(args.data_path, 'tgt_scalers.bin'), 'rb'))
cat_encodings = pickle.load(open(os.path.join(args.data_path,'cat_encodings.bin'), 'rb'))
unscaled_predictions, unscaled_targets, _, _ = predict(args, config, model, test_loader, tgt_scalers, cat_encodings)
unscaled_predictions = torch.from_numpy(unscaled_predictions).contiguous()
unscaled_targets = torch.from_numpy(unscaled_targets).contiguous()
losses = QuantileLoss(config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
quantiles = 2 * losses / normalizer
if args.distributed_world_size > 1:
quantiles = quantiles.cuda()
dist.all_reduce(quantiles)
quantiles /= args.distributed_world_size
quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
finish_log = {**quantiles, 'average_ips':perf_meter.avg, 'convergence_step':validate.conv_step}
dllogger.log(step=(), data=finish_log, verbosity=1)
def validate(args, config, model, criterion, dataloader, global_step):
if not hasattr(validate, 'best_valid_loss'):
validate.best_valid_loss = float('inf')
if not hasattr(validate, 'early_stop_c'):
validate.early_stop_c = 0
model.eval()
losses = []
torch.cuda.synchronize()
validation_start = time.time()
for batch in dataloader:
with torch.jit.fuser("fuser2"), amp.autocast(enabled=args.use_amp), torch.no_grad():
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
bs = next(t for t in batch.values() if t is not None).shape[0]
losses.append((p_losses, bs))
torch.cuda.synchronize()
validation_end = time.time()
p_losses = sum([l[0]*l[1] for l in losses])/sum([l[1] for l in losses]) #takes into accunt that the last batch is not full
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses = p_losses/args.distributed_world_size
ips = len(dataloader.dataset) / (validation_end - validation_start)
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': p_losses.sum().item(), 'items/s':ips}
if log_dict['loss'] < validate.best_valid_loss:
validate.best_valid_loss = log_dict['loss']
validate.early_stop_c = 0
validate.conv_step = global_step
if not dist.is_initialized() or dist.get_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, (DDP, ModelEma)) else model.state_dict()
ckpt = {'args':args, 'config':config, 'model':state_dict}
torch.save(ckpt, os.path.join(args.results, 'checkpoint.pt'))
if args.distributed_world_size > 1:
dist.barrier()
else:
validate.early_stop_c += 1
log_dict = {'val_'+k:v for k,v in log_dict.items()}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True,
help='Path to the dataset')
parser.add_argument('--dataset', type=str, required=True, choices=CONFIGS.keys(),
help='Dataset name')
parser.add_argument('--epochs', type=int, default=25,
help='Default number of training epochs')
parser.add_argument('--sample_data', type=lambda x: int(float(x)), nargs=2, default=[-1, -1],
help="""Subsample the dataset. Specify number of training and valid examples.
Values can be provided in scientific notation. Floats will be truncated.""")
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use_amp', action='store_true', help='Enable automatic mixed precision')
parser.add_argument('--clip_grad', type=float, default=0.0)
parser.add_argument('--grad_accumulation', type=int, default=0)
parser.add_argument('--early_stopping', type=int, default=1000,
help='Stop training if validation loss does not improve for more than this number of epochs.')
parser.add_argument('--results', type=str, default='/results',
help='Directory in which results are stored')
parser.add_argument('--log_file', type=str, default='dllogger.json',
help='Name of dllogger output file')
parser.add_argument('--overwrite_config', type=str, default='',
help='JSON string used to overload config')
parser.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
parser.add_argument("--ema_decay", type=float, default=0.0, help='Use exponential moving average')
parser.add_argument("--disable_benchmark", action='store_true', help='Disable benchmarking mode')
ARGS = parser.parse_args()
main(ARGS)
|
CUDA-Optimized/FastSpeech/waveglow | waveglow | model | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type() == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(
res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
torch.IntTensor([self.n_channels]))
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, :self.n_channels, :] + audio
skip_acts = res_skip_acts[:, self.n_channels:, :]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, :self.n_early_size, :])
audio = audio[:, self.n_early_size:, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0),
self.n_remaining_channels,
spect.size(2), device=spect.device).to(spect.dtype)
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size, spect.size(
2), device=spect.device).to(spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(
0, 2, 1).contiguous().view(
audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
|
TensorFlow/Translation/GNMT/scripts | scripts | parse_log | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import re
import sys
import json
from pathlib import Path
from subprocess import Popen, PIPE
parser = argparse.ArgumentParser(description='Parse training logs')
parser.add_argument('log', help='path to log file', type=Path)
args = parser.parse_args()
content = args.log.read_bytes()
bleu = list(map(lambda x: float(x[0]), re.findall(rb'\nbleu is ((\d|.)+)', content)))
training_speed = re.findall(rb'\ntraining time for epoch (\d+): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
training_tokens = list(map(lambda x: float(x[5]), training_speed))
training_sentences = list(map(lambda x: float(x[3]), training_speed))
eval_speed = re.findall(rb'\neval time for epoch (\d+): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
if not eval_speed:
eval_speed = re.findall(rb'\neval time for ckpt(): ((\d|.)+) mins \(((\d|.)+) sent/sec, ((\d|.)+) tokens/sec\)', content)
eval_tokens = list(map(lambda x: float(x[5]), eval_speed))
eval_sentences = list(map(lambda x: float(x[3]), eval_speed))
experiment_duration = float(re.findall(rb'\nExperiment took ((\d|.)+) min', content)[0][0])
ret = {}
ret['bleu'] = bleu
ret['training_tokens_per_sec'] = training_tokens
ret['training_sentences_per_sec'] = training_sentences
ret['eval_tokens_per_sec'] = eval_tokens
ret['eval_sentences_per_sec'] = eval_sentences
ret['duration'] = experiment_duration
print(json.dumps(ret))
|
PyTorch/Recommendation/DLRM/preproc | preproc | DGX-2_config | #!/bin/bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the environment variables to run spark job
# should modify below environment variables
# below numbers should be adjusted according to the resource of your running environment
# set the total number of CPU cores, spark can use
export TOTAL_CORES=80
# set the number of executors
export NUM_EXECUTORS=16
# the cores for each executor, it'll be calculated
export NUM_EXECUTOR_CORES=$((${TOTAL_CORES}/${NUM_EXECUTORS}))
# unit: GB, set the max memory you want to use
export TOTAL_MEMORY=800
# unit: GB, set the memory for driver
export DRIVER_MEMORY=32
# the memory per executor
export EXECUTOR_MEMORY=$(((${TOTAL_MEMORY}-${DRIVER_MEMORY})/${NUM_EXECUTORS}-16))
|
TensorFlow/Segmentation/UNet_Industrial/datasets | datasets | dagm2007 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright (c) Jonathan Dekhtiar - contact@jonathandekhtiar.eu
# All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
# ==============================================================================
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import glob
import tensorflow as tf
import horovod.tensorflow as hvd
from datasets.core import BaseDataset
from utils import hvd_utils
from dllogger import Logger
__all__ = ['DAGM2007_Dataset']
class DAGM2007_Dataset(BaseDataset):
dataset_name = "DAGM2007"
def __init__(self, data_dir, class_id):
if class_id is None:
raise ValueError("The parameter `class_id` cannot be set to None")
data_dir = os.path.join(data_dir, "raw_images/private/Class%d" % class_id)
super(DAGM2007_Dataset, self).__init__(data_dir)
def _get_data_dirs(self, training):
if training:
csv_file = os.path.join(self.data_dir, "train_list.csv")
image_dir = os.path.join(self.data_dir, "Train")
else:
csv_file = os.path.join(self.data_dir, "test_list.csv")
image_dir = os.path.join(self.data_dir, "Test")
return image_dir, csv_file
def get_dataset_runtime_specs(self, training, iter_unit, num_iter, global_batch_size):
image_dir, _ = self._get_data_dirs(training=training)
filenames = glob.glob(os.path.join(image_dir, "*.PNG"))
num_samples = len(filenames)
num_steps, num_epochs = DAGM2007_Dataset._count_steps(
iter_unit=iter_unit, num_samples=num_samples, num_iter=num_iter, global_batch_size=global_batch_size
)
return filenames, num_samples, num_steps, num_epochs
def dataset_fn(
self,
batch_size,
training,
input_shape,
mask_shape,
num_threads,
use_gpu_prefetch,
normalize_data_method,
only_defective_images,
augment_data,
seed=None
):
super(DAGM2007_Dataset, self).dataset_fn(
batch_size=batch_size,
training=training,
input_shape=input_shape,
mask_shape=mask_shape,
num_threads=num_threads,
use_gpu_prefetch=use_gpu_prefetch,
normalize_data_method=normalize_data_method, # [None, "zero_centered", "zero_one"]
only_defective_images=only_defective_images,
augment_data=augment_data,
seed=seed
)
shuffle_buffer_size = 10000
image_dir, csv_file = self._get_data_dirs(training=training)
mask_image_dir = os.path.join(image_dir, "Label")
dataset = tf.data.TextLineDataset(csv_file)
dataset = dataset.skip(1) # Skip CSV Header
if only_defective_images:
dataset = dataset.filter(lambda line: tf.not_equal(tf.strings.substr(line, -1, 1), "0"))
if hvd_utils.is_using_hvd() and training:
dataset = dataset.shard(hvd.size(), hvd.rank())
def _load_dagm_data(line):
input_image_name, image_mask_name, label = tf.decode_csv(
line, record_defaults=[[""], [""], [0]], field_delim=','
)
def decode_image(filepath, resize_shape, normalize_data_method):
image_content = tf.read_file(filepath)
# image = tf.image.decode_image(image_content, channels=resize_shape[-1])
image = tf.image.decode_png(contents=image_content, channels=resize_shape[-1], dtype=tf.uint8)
image = tf.image.resize_images(
image,
size=resize_shape[:2],
method=tf.image.ResizeMethod.BILINEAR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
align_corners=False,
preserve_aspect_ratio=True
)
image.set_shape(resize_shape)
image = tf.cast(image, tf.float32)
if normalize_data_method == "zero_centered":
image = tf.divide(image, 127.5) - 1
elif normalize_data_method == "zero_one":
image = tf.divide(image, 255.0)
return image
input_image = decode_image(
filepath=tf.strings.join([image_dir, input_image_name], separator='/'),
resize_shape=input_shape,
normalize_data_method=normalize_data_method,
)
mask_image = tf.cond(
tf.equal(image_mask_name, ""),
true_fn=lambda: tf.zeros(mask_shape, dtype=tf.float32),
false_fn=lambda: decode_image(
filepath=tf.strings.join([mask_image_dir, image_mask_name], separator='/'),
resize_shape=mask_shape,
normalize_data_method="zero_one",
),
)
label = tf.cast(label, tf.int32)
return tf.data.Dataset.from_tensor_slices(([input_image], [mask_image], [label]))
dataset = dataset.apply(
tf.data.experimental.parallel_interleave(
_load_dagm_data,
cycle_length=batch_size*8,
block_length=4,
buffer_output_elements=batch_size*8
)
)
dataset = dataset.cache()
if training:
dataset = dataset.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=shuffle_buffer_size, seed=seed))
else:
dataset = dataset.repeat()
def _augment_data(input_image, mask_image, label):
if augment_data:
if not hvd_utils.is_using_hvd() or hvd.rank() == 0:
print("Using data augmentation ...")
#input_image = tf.image.per_image_standardization(input_image)
horizontal_flip = tf.random_uniform(shape=(), seed=seed) > 0.5
input_image = tf.cond(
horizontal_flip, lambda: tf.image.flip_left_right(input_image), lambda: input_image
)
mask_image = tf.cond(horizontal_flip, lambda: tf.image.flip_left_right(mask_image), lambda: mask_image)
n_rots = tf.random_uniform(shape=(), dtype=tf.int32, minval=0, maxval=3, seed=seed)
input_image = tf.image.rot90(input_image, k=n_rots)
mask_image = tf.image.rot90(mask_image, k=n_rots)
return (input_image, mask_image), label
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
map_func=_augment_data,
num_parallel_calls=num_threads,
batch_size=batch_size,
drop_remainder=True,
)
)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
if use_gpu_prefetch:
dataset.apply(tf.data.experimental.prefetch_to_device(device="/gpu:0", buffer_size=4))
return dataset
if __name__ == "__main__":
'''
Data Loading Benchmark Usage:
# Real Data - Training
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--training \
--class_id=1
# Real Data - Inference
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1
# --------------- #
# Synthetic Data - Training
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1 \
--training \
--use_synthetic_data
# Synthetic Data - Inference
python -m datasets.dagm2007 \
--data_dir="/data/dagm2007/" \
--batch_size=64 \
--warmup_steps=200 \
--benchmark_steps=2000 \
--class_id=1 \
--use_synthetic_data
# --------------- #
'''
import time
import argparse
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser(description="DAGM2007_data_loader_benchmark")
parser.add_argument(
'--data_dir', required=True, type=str, help="Directory path which contains the preprocessed DAGM 2007 dataset"
)
parser.add_argument(
'--batch_size', default=64, type=int, required=True, help="""Batch size used to measure performance."""
)
parser.add_argument(
'--warmup_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--benchmark_steps',
default=200,
type=int,
required=True,
help="""Number of steps considered as warmup and not taken into account for performance measurements."""
)
parser.add_argument(
'--class_id',
default=1,
choices=range(1, 11), # between 1 and 10
type=int,
required=True,
help="""Class ID used for benchmark."""
)
parser.add_argument("--training", default=False, action="store_true", help="Benchmark in training mode")
parser.add_argument("--use_synthetic_data", default=False, action="store_true", help="Use synthetic dataset")
FLAGS, unknown_args = parser.parse_known_args()
if len(unknown_args) > 0:
for bad_arg in unknown_args:
print("ERROR: Unknown command line arg: %s" % bad_arg)
raise ValueError("Invalid command line arg(s)")
BURNIN_STEPS = FLAGS.warmup_steps
TOTAL_STEPS = FLAGS.warmup_steps + FLAGS.benchmark_steps
dataset = DAGM2007_Dataset(data_dir=FLAGS.data_dir, class_id=FLAGS.class_id)
_filenames, _num_samples, _num_steps, _num_epochs = dataset.get_dataset_runtime_specs(
training=FLAGS.training, iter_unit="batch", num_iter=TOTAL_STEPS, global_batch_size=FLAGS.batch_size
)
tf.logging.info("[*] Executing Benchmark in %s mode" % ("training" if FLAGS.training else "inference"))
tf.logging.info("[*] Benchmark using %s data" % ("synthetic" if FLAGS.use_synthetic_data else "real"))
print()
tf.logging.info("[*] num_samples: %d" % _num_samples)
tf.logging.info("[*] num_steps: %d" % _num_steps)
tf.logging.info("[*] num_epochs: %d" % _num_epochs)
time.sleep(4)
if not FLAGS.use_synthetic_data:
# Build the data input
dataset = dataset.dataset_fn(
batch_size=FLAGS.batch_size,
training=FLAGS.training,
input_shape=(512, 512, 1),
mask_shape=(512, 512, 1),
num_threads=64,
use_gpu_prefetch=True,
seed=None
)
else:
# Build the data input
dataset = dataset.synth_dataset_fn(
batch_size=FLAGS.batch_size,
training=FLAGS.training,
input_shape=(512, 512, 1),
mask_shape=(512, 512, 1),
num_threads=64,
use_gpu_prefetch=True,
seed=None
)
dataset_iterator = dataset.make_initializable_iterator()
(input_images, mask_images), labels = dataset_iterator.get_next()
print("Input Image Shape: %s" % (input_images.get_shape()))
print("Mask Image Shape: %s" % (mask_images.get_shape()))
print("Label Shape: %s" % (labels.get_shape()))
input_images = tf.image.resize_image_with_crop_or_pad(input_images, target_height=512, target_width=512)
with tf.device("/gpu:0"):
input_images = tf.identity(input_images)
mask_images = tf.identity(mask_images)
labels = tf.identity(labels)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
with tf.Session(config=config) as sess:
sess.run(dataset_iterator.initializer)
sess.run(tf.global_variables_initializer())
sess.run(tf.global_variables_initializer())
total_files_processed = 0
img_per_sec_arr = []
processing_time_arr = []
processing_start_time = time.time()
for step in range(TOTAL_STEPS):
start_time = time.time()
img_batch, mask_batch, lbl_batch = sess.run([input_images, mask_images, labels])
batch_size = img_batch.shape[0]
total_files_processed += batch_size
elapsed_time = (time.time() - start_time) * 1000
imgs_per_sec = (batch_size / elapsed_time) * 1000
if (step + 1) > BURNIN_STEPS:
processing_time_arr.append(elapsed_time)
img_per_sec_arr.append(imgs_per_sec)
if (step + 1) % 20 == 0 or (step + 1) == TOTAL_STEPS:
print(
"[STEP %04d] # Files: %03d - Time: %03d msecs - Speed: %6d img/s" %
(step + 1, batch_size, elapsed_time, imgs_per_sec)
)
processing_time = time.time() - processing_start_time
avg_processing_speed = np.mean(img_per_sec_arr)
print("\n###################################################################")
print("*** Data Loading Performance Metrics ***\n")
print("\t=> Number of Steps: %d" % (step + 1))
print("\t=> Batch Size: %d" % FLAGS.batch_size)
print("\t=> Files Processed: %d" % total_files_processed)
print("\t=> Total Execution Time: %d secs" % processing_time)
print("\t=> Median Time per step: %3d msecs" % np.median(processing_time_arr))
print("\t=> Median Processing Speed: %d images/secs" % np.median(img_per_sec_arr))
print("\t=> Median Processing Time: %.2f msecs/image" % (1 / float(np.median(img_per_sec_arr)) * 1000))
print("\n*** Debug Shape Information:")
print(
"\t[*] Batch Shape: %s - Max Val: %.2f - Min Val: %.2f - Mean: %.2f - Stddev: %.2f" % (
str(img_batch.shape), np.max(img_batch), np.min(img_batch), float(np.mean(img_batch)),
float(np.std(img_batch))
)
)
print(
"\t[*] Mask Shape: %s - Max Val: %.2f - Min Val: %.2f - Mean: %.2f - Stddev: %.2f" % (
str(mask_batch.shape), np.max(mask_batch), np.min(mask_batch), float(np.mean(mask_batch)),
float(np.std(mask_batch))
)
)
print(
"\t[*] Label Shape: %s - Max Val: %.2f - Min Val: %.2f" %
(str(lbl_batch.shape), np.max(lbl_batch), np.min(lbl_batch))
)
|
PyTorch/LanguageModeling/BERT/triton/dist6l/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | checkpoints:
- name: dist-6l-qa
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/bert_pyt_ckpt_distilled_6l_768d_qa_squad11_amp/versions/20.12.0/zip
configurations:
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: trt
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
batch_sizes: '1'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 1
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: '1'
- accelerator: none
accelerator_precision: fp16
batch_size:
- 16
batch_sizes: '16'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
- accelerator: none
accelerator_precision: fp16
batch_size:
- 8
batch_sizes: '8'
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: onnx
export_precision: fp16
format: trt
max_batch_size: 8
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 4 8
- accelerator: none
accelerator_precision: fp16
batch_size:
- 1
- 8
- 16
batch_sizes: 1 8 16
capture_cuda_graph: 0
checkpoint_variant: dist-6l-qa
export_format: ts-trace
export_precision: fp16
format: ts-trace
max_batch_size: 16
max_seq_length: 384
precision: fp16
triton_gpu_engine_count: 1
triton_max_queue_delay: 1
triton_preferred_batch_sizes: 8 16
container_version: '21.10'
datasets:
- name: data
datasets_dir: datasets
framework: PyTorch
model_name: BERT
triton_container_image: null
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
Tools/PyTorch/TimeSeriesPredictionPlatform/loggers | loggers | backends | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import atexit
import time
from collections import OrderedDict
from threading import Thread
from queue import Queue
from functools import partial
from typing import Callable
from torch.utils.tensorboard import SummaryWriter
from dllogger import Backend
from distributed_utils import is_parallel
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.updated = False
self.avg = 0
self.sum = 0
self.count = 0
def update(self, value):
self.updated = True
if isinstance(value, (tuple, list)):
val = value[0]
n = value[1]
else:
val = value
n = 1
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def value(self):
return self.avg
class PerformanceMeter:
def __init__(self):
self.reset()
def reset(self):
self.updated = False
self.start = time.time()
self.n = 0
def update(self, val=1):
self.updated = True
self.n += val
@property
def value(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return time.time() - self.start
class AggregatorBackend(Backend):
def __init__(self, verbosity, agg_dict):
super().__init__(verbosity=verbosity)
self.metrics = OrderedDict({k: v() for k, v in agg_dict.items()})
self.metrics.flushed = True
self.step = 0
self.epoch = 0
self.start_time = time.time()
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def _reset_perf_meter(self, name):
for agg in self.metrics[name]:
if isinstance(agg, PerformanceMeter):
agg.reset()
def reset_perf_meters(self):
# This method allows us to reset performance metrics in case we want to
# exclude couple first iterations from performance measurement
for name in self.metrics.keys():
self._reset_perf_meter(name)
def log(self, timestamp, elapsedtime, step, data):
self.step = step
if self.step == []:
self.metrics.flushed = True
if "epoch" in data.keys():
self.epoch = data["epoch"]
for k, v in data.items():
if k not in self.metrics.keys():
continue
self.metrics.flushed = False
self.metrics[k].update(v)
def flush(self):
if self.metrics.flushed:
return
result_string = "Epoch {} | step {} |".format(self.epoch, self.step)
for name, agg in self.metrics.items():
if not agg.updated:
continue
if isinstance(agg, AverageMeter):
_name = "avg " + name
elif isinstance(agg, PerformanceMeter):
_name = name + "/s"
result_string += _name + " {:.3f} |".format(agg.value)
agg.reset()
result_string += "walltime {:.3f} |".format(time.time() - self.start_time)
self.metrics.flushed = True
print(result_string)
class TensorBoardBackend(Backend):
def __init__(self, verbosity, log_dir='.'):
super().__init__(verbosity=verbosity)
self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, "TB_summary"), flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def log(self, timestamp, elapsedtime, step, data):
if not isinstance(step, int):
return
for k, v in data.items():
self.summary_writer.add_scalar(k, v, step)
def flush(self):
pass
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets | datasets | list_dataset | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image
from maskrcnn_benchmark.structures.bounding_box import BoxList
class ListDataset(object):
def __init__(self, image_lists, transforms=None):
self.image_lists = image_lists
self.transforms = transforms
def __getitem__(self, item):
img = Image.open(self.image_lists[item]).convert("RGB")
# dummy target
w, h = img.size
target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.image_lists)
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
pass
|
PyTorch/Forecasting/TFT/triton/runner/maintainer/docker | docker | container | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | faster_rcnn_inception_v2_pets | # Faster R-CNN with Inception v2, configured for Oxford-IIIT Pets Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 37
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_inception_v2'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
initial_crop_size: 14
maxpool_kernel_size: 2
maxpool_stride: 2
second_stage_box_predictor {
mask_rcnn_box_predictor {
use_dropout: false
dropout_keep_probability: 1.0
fc_hyperparams {
op: FC
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
variance_scaling_initializer {
factor: 1.0
uniform: true
mode: FAN_AVG
}
}
}
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0002
schedule {
step: 900000
learning_rate: .00002
}
schedule {
step: 1200000
learning_rate: .000002
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
load_all_detection_checkpoint_vars: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
num_examples: 1101
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/bermuda | bermuda | onnx | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
TensorFlow2/LanguageModeling/BERT | BERT | common_flags | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defining common flags used across all BERT models/applications."""
from absl import flags
import tensorflow as tf
from official.utils.flags import core as flags_core
def define_common_bert_flags():
"""Define common flags for BERT tasks."""
flags_core.define_base(
data_dir=False,
model_dir=True,
clean=False,
train_epochs=False,
epochs_between_evals=False,
stop_threshold=False,
batch_size=False,
num_gpu=True,
hooks=False,
export_dir=False,
distribution_strategy=True,
run_eagerly=True)
flags.DEFINE_string('bert_config_file', None,
'Bert configuration file to define core bert layers.')
flags.DEFINE_string(
'model_export_path', None,
'Path to the directory, where trainined model will be '
'exported.')
flags.DEFINE_string('tpu', '', 'TPU address to connect to.')
flags.DEFINE_string(
'init_checkpoint', None,
'Initial checkpoint (usually from a pre-trained BERT model).')
flags.DEFINE_bool('use_horovod', False, 'Whether to use horovod.')
flags.DEFINE_integer('num_accumulation_steps', 1,
'Number of accumulation steps before gradient update.')
flags.DEFINE_integer('num_train_epochs', 3,
'Total number of training epochs to perform.')
flags.DEFINE_integer(
'steps_per_loop', 200,
'Number of steps per graph-mode loop. Only training step '
'happens inside the loop. Callbacks will not be called '
'inside.')
flags.DEFINE_float('learning_rate', 5e-5,
'The initial learning rate for Adam.')
flags.DEFINE_boolean(
'scale_loss', False,
'Whether to divide the loss by number of replica inside the per-replica '
'loss function.')
flags.DEFINE_boolean(
'use_keras_compile_fit', False,
'If True, uses Keras compile/fit() API for training logic. Otherwise '
'use custom training loop.')
flags.DEFINE_string(
'hub_module_url', None, 'TF-Hub path/url to Bert module. '
'If specified, init_checkpoint flag should not be used.')
flags.DEFINE_enum(
'model_type', 'bert', ['bert', 'albert'],
'Specifies the type of the model. '
'If "bert", will use canonical BERT; if "albert", will use ALBERT model.')
flags.DEFINE_boolean(
'use_fp16', False,
'Whether to use fp32 or fp16 arithmetic on GPU.')
flags.DEFINE_string("optimizer_type", "adam",
"Optimizer used for training - LAMB or ADAM")
flags.DEFINE_integer(
'save_checkpoint_steps', 1000,
'save checkpoint for every n steps')
flags.DEFINE_string(
'dllog_path', 'bert_dllogger.json', 'filename where dllogger writes to')
flags.DEFINE_boolean(
'benchmark', False,
'Benchmark mode.')
# Adds flags for mixed precision training.
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=False,
max_train_steps=False,
dtype=True,
dynamic_loss_scale=True,
loss_scale=True,
all_reduce_alg=False,
num_packs=False,
enable_xla=True,
fp16_implementation=True,
)
def use_float16():
return flags_core.get_tf_dtype(flags.FLAGS) == tf.float16
def get_loss_scale():
return flags_core.get_loss_scale(flags.FLAGS, default_for_fp16='dynamic')
|
PyTorch/LanguageModeling/BERT/distillation/utils | utils | convert_ckpts | # coding=utf-8
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import copy
import torch
ckpt = sys.argv[1]
model = torch.load(ckpt)
if "model" in model.keys():
model = model["model"]
torch.save(model, ckpt)
|
PyTorch/LanguageModeling/BART/bart/configuration | configuration | configuration_t5 | # coding=utf-8
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
# Copyright 2010, The T5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5 model configuration """
from bart.configuration.configuration_utils import PretrainedConfig
from utils import logging
logger = logging.get_logger(__name__)
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-small-config.json",
"t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-base-config.json",
"t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-large-config.json",
"t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-3b-config.json",
"t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-11b-config.json",
}
class T5Config(PretrainedConfig):
r"""
:class:`~transformers.T5Config` is the configuration class to store the configuration of a
`T5Model`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `T5Model`.
d_model: Size of the encoder layers and the pooler layer. `d_model` can also accesed via the property `hidden_size`.
num_layers: Number of hidden layers in the Transformer encoder. `num_layers` can also be accessed via the property `num_hidden_layers`.
d_kv: Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model // num_heads`.
d_ff: Size of the intermediate feed forward layer in each `T5Block`.
num_heads: Number of attention heads for each attention layer in
the Transformer encoder. `num_heads` can also be accessed via the property `num_attention_heads`.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
n_positions: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048). `n_positions` can also be accessed via the property `max_position_embeddings`.
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`T5Model`.
initializer_factor: A factor for initializing all weight matrices (should be kept to 1.0, used for initialization testing).
layer_norm_eps: The epsilon used by LayerNorm.
"""
model_type = "t5"
def __init__(
self,
vocab_size=32128,
n_positions=512,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_heads=8,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
is_encoder_decoder=True,
pad_token_id=0,
eos_token_id=1,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.vocab_size = vocab_size
self.n_positions = n_positions
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers |
PyTorch/Recommendation/DLRM/dlrm/cuda_src/dot_based_interact | dot_based_interact | dot_based_interact_fp32_bwd | #include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <mma.h>
#include <cuda_fp16.hpp>
#include <math.h>
#include <fstream>
#include <iomanip>
#include <iostream>
#include <vector>
#include <ATen/cuda/CUDAContext.h>
#include <torch/extension.h>
#include "shared_utils.cuh"
using namespace nvcuda;
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__
void dotBasedInteractF32BwdKernelNonAligned(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size]; //skip over the part where we copy in the input
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols]; //where the bottom mlp grad of our sample will land
float *gmem_interaction_grad = &grad[grad_batch_offset]; //where the interaction grads of our sample will land
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
// fwd output contained mlp at the start, so the gradient has mlp grad at the start
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
for (uint idx = threadIdx.x; idx < input_size; idx += blockDim.x) {
smem_in[idx] = gmem_in[idx];
}
// Interaction Upstream Grad -> Shared Memory
for (uint idx = threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
gmem_mlp_grad[idx] = gmem_mlp_ugrad[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
// Calculate a single column (1...128) of the output
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
// Pick a row: now we calculating a single value of the gradient
float sum = 0;
// Jump to our row in (flattened) triangular matrix of upstream gradients
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
// Iterate over all the interactions we took part in
// Sum upstream gradient for that interaction multiplied with the right element of the other vector in the interaction
// We need to do this in two passes because we only keep the triangular part of the matrix, so the row "bends"
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
template <uint THREADBLOCK_SIZE>
__launch_bounds__(THREADBLOCK_SIZE) __global__ void dotBasedInteractF32BwdKernel(const float *__restrict input,
const float *__restrict upstream_grad,
float *__restrict grad,
float *__restrict bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols,
uint input_size,
uint padded_ugrad_size,
uint interaction_ugrad_size) {
// This kernel assumes that:
// input_size is divisible by 4
// num_cols is divisible by 4
extern __shared__ float smem_f32_bwd[];
float *smem_in = &smem_f32_bwd[0];
float *smem_interaction_ugrad = &smem_f32_bwd[input_size];
// Input
uint input_batch_offset = blockIdx.x * input_size;
const float *gmem_in = &input[input_batch_offset];
// Gradient
const uint &grad_batch_offset = input_batch_offset;
float *gmem_mlp_grad = &bottom_mlp_grad[blockIdx.x * num_cols];
float *gmem_interaction_grad = &grad[grad_batch_offset];
// Upstream Gradient
uint upstream_grad_batch_offset = blockIdx.x * padded_ugrad_size;
const float *gmem_mlp_ugrad = &upstream_grad[upstream_grad_batch_offset];
const float *gmem_interaction_ugrad = &upstream_grad[upstream_grad_batch_offset + num_cols];
// input -> shared memory
uint input_size_float4 = input_size >> 2;
for (uint idx = threadIdx.x; idx < input_size_float4; idx += blockDim.x) {
((float4 *)smem_in)[idx] = ((float4 *)gmem_in)[idx];
}
// Interaction Upstream Grad -> Shared Memory
uint upstream_grad_size_float4 = interaction_ugrad_size >> 2;
for (uint idx = threadIdx.x; idx < upstream_grad_size_float4; idx += blockDim.x) {
((float4 *)smem_interaction_ugrad)[idx] = ((float4 *)gmem_interaction_ugrad)[idx];
}
// This may seem like it may never be activated, but it will
// interaction_ugrad_size is the unpadded size, so it will probably not align to 4
// This loop copies the part that is left over from the vectorized copy above
uint vectorized_load_offset = (upstream_grad_size_float4 << 2);
for (uint idx = vectorized_load_offset + threadIdx.x; idx < interaction_ugrad_size; idx += blockDim.x) {
smem_interaction_ugrad[idx] = gmem_interaction_ugrad[idx];
}
__syncthreads();
// Copy the upstream gradient w.r.t to mlp to it's corresponding memory location.
for (uint idx = threadIdx.x; idx < (num_cols >> 2); idx += blockDim.x) {
((float4 *)gmem_mlp_grad)[idx] = ((float4 *)gmem_mlp_ugrad)[idx];
}
for (uint idx = threadIdx.x; idx < num_cols; idx += blockDim.x) {
size_t grad_idx = idx;
for (uint row_idx = 0; row_idx < num_rows; row_idx++) {
float sum = 0;
size_t upstream_grad_offset = (row_idx * (row_idx - 1)) >> 1;
for (int k = 0; k < row_idx; k++) {
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + k], sum);
}
for (int k = row_idx + 1; k < num_rows; k++) {
upstream_grad_offset = (k * (k - 1)) >> 1; // TODO: this can become a sum
sum = fmaf(smem_in[k * num_cols + idx], smem_interaction_ugrad[upstream_grad_offset + row_idx], sum);
}
gmem_interaction_grad[grad_idx] = sum;
grad_idx += num_cols;
}
}
}
inline void dotBasedInteractF32Bwd(const void *input,
const void *upstream_grad,
void *grad,
void *bottom_mlp_grad,
uint batch_size,
uint num_rows,
uint num_cols) {
const uint kNumThreads = 128;
uint num_blocks = batch_size;
uint input_size = num_rows * num_cols;
// 1D ugrad size
uint interaction_ugrad_size = num_rows * (num_rows - 1) >> 1; //this IS supposed to be without padding
// this has to be the same padding that we applied in forward
uint unpadded_ugrad_size = num_cols + interaction_ugrad_size;
// this has to be the same padding that we applied in forward
uint padded_ugrad_size = ((unpadded_ugrad_size-1)/8 + 1)*8; //round up to multiple of 8
// input space + upstream grad space
// We copy the whole input plus just the unpadded interaction part of the upstream grad
uint smem_size_elems = input_size + interaction_ugrad_size;
uint smem_size_bytes = smem_size_elems << 2; // F32 Kernel
// we use the fact that padded_ugrad_size is always divisible by 4 - we just made it.
bool float4_predicate = !(num_cols & 3);
if (float4_predicate) {
dotBasedInteractF32BwdKernel<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes,
at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
} else {
dotBasedInteractF32BwdKernelNonAligned<kNumThreads>
<<<num_blocks, kNumThreads, smem_size_bytes,
at::cuda::getCurrentCUDAStream()>>>((const float *)input,
(const float *)upstream_grad,
(float *)grad,
(float *)bottom_mlp_grad,
batch_size,
num_rows,
num_cols,
input_size,
padded_ugrad_size,
interaction_ugrad_size);
}
}
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/benchmark/data_loader/datasets | datasets | base_dataset | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC
class BaseDataset(ABC):
def get_graph(self, *args, **kwargs):
raise NotImplementedError("`get_graph` fn not implemented")
|
Kaldi/SpeechRecognition/notebooks | notebooks | Kaldi_TRTIS_inference_online_demo | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;">
#
# # Kaldi TRTIS Inference Online Demo
# ## Overview
#
#
# This repository provides a wrapper around the online GPU-accelerated ASR pipeline from the paper [GPU-Accelerated Viterbi Exact Lattice Decoder for Batched Online and Offline Speech Recognition](https://arxiv.org/abs/1910.10032). That work includes a high-performance implementation of a GPU HMM Decoder, a low-latency Neural Net driver, fast Feature Extraction for preprocessing, and new ASR pipelines tailored for GPUs. These different modules have been integrated into the Kaldi ASR framework.
#
# This repository contains a TensorRT Inference Server custom backend for the Kaldi ASR framework. This custom backend calls the high-performance online GPU pipeline from the Kaldi ASR framework. This TensorRT Inference Server integration provides ease-of-use to Kaldi ASR inference: gRPC streaming server, dynamic sequence batching, and multi-instances support. A client connects to the gRPC server, streams audio by sending chunks to the server, and gets back the inferred text as an answer. More information about the TensorRT Inference Server can be found [here](https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-guide/docs/).
#
#
#
# ### Learning objectives
#
# This notebook demonstrates the steps for carrying out inferencing with the Kaldi TRTIS backend server using a Python gRPC client in an online context, that is, we will stream live audio from a microphone to the inference server and receive the results back.
#
# ## Content
# 1. [Pre-requisite](#1)
# 1. [Setup](#2)
# 1. [Audio helper classes](#3)
# 1. [Inference](#4)
#
# <a id="1"></a>
# ## 1. Pre-requisite
#
#
# ### 1.1 Docker containers
# Follow the steps in [README](README.md) to build Kaldi server and client containers.
#
# ### 1.2 Hardware
# This notebook can be executed on any CUDA-enabled NVIDIA GPU, although for efficient mixed precision inference, a [Tensor Core NVIDIA GPU](https://www.nvidia.com/en-us/data-center/tensorcore/) is desired (Volta, Turing or newer architectures).
# In[1]:
get_ipython().system('nvidia-smi')
# This notebook also requires access to a microphone.
# <a id="2"></a>
# ## 2 Setup
# ### Import libraries and parameters
# In[2]:
import argparse
import numpy as np
import os
import sys
from builtins import range
from functools import partial
import soundfile
import pyaudio as pa
import soundfile
import librosa
import grpc
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
import tensorrtserver.api.model_config_pb2 as model_config
# In[3]:
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='Path for input file. First line should contain number of lines to search in')
parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False,
help='Enable verbose output')
parser.add_argument('-a', '--async', dest="async_set", action="store_true", required=False,
default=False, help='Use asynchronous inference API')
parser.add_argument('--streaming', action="store_true", required=False, default=False,
help='Use streaming inference API')
parser.add_argument('-m', '--model-name', type=str, required=False, default='kaldi_online' ,
help='Name of model')
parser.add_argument('-x', '--model-version', type=int, required=False, default=1,
help='Version of model. Default is to use latest version.')
parser.add_argument('-b', '--batch-size', type=int, required=False, default=1,
help='Batch size. Default is 1.')
parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8001',
help='Inference server URL. Default is localhost:8001.')
parser.add_argument('--chunk_duration', type=float, required=False,
default=0.51,
help="duration of the audio chunk for streaming "
"recognition, in seconds")
parser.add_argument('--input_device_id', type=int, required=False,
default=-1, help='Input device id to use to capture audio')
parser.add_argument('--sample_rate', type=int, required=False,
default=16000, help='Sample rate.')
FLAGS = parser.parse_args()
# ### Checking server status
#
# We first query the status of the server. The target model is 'kaldi_online'. A successful deployment of the Kaldi TRTIS server should result in output similar to the below.
#
# ```
# request_status {
# code: SUCCESS
# server_id: "inference:0"
# request_id: 17514
# }
# server_status {
# id: "inference:0"
# version: "1.9.0"
# uptime_ns: 14179155408971
# model_status {
# key: "kaldi_online"
# ...
# ```
# In[4]:
# Create gRPC stub for communicating with the server
channel = grpc.insecure_channel(FLAGS.url)
grpc_stub = grpc_service_pb2_grpc.GRPCServiceStub(channel)
# Prepare request for Status gRPC
request = grpc_service_pb2.StatusRequest(model_name=FLAGS.model_name)
# Call and receive response from Status gRPC
response = grpc_stub.Status(request)
print(response)
# ### Testing microphone
#
# We next identify the input devices in the system. You will need to select a relevant input device amongst the ones listed.
# In[5]:
import pyaudio
import wave
p = pyaudio.PyAudio() # Create an interface to PortAudio
device_info = p.get_host_api_info_by_index(0)
num_devices = device_info.get('deviceCount')
devices = {}
for i in range(0, num_devices):
#if (p.get_device_info_by_host_api_device_index(0, i).get(
# 'maxInputChannels')) > 0:
devices[i] = p.get_device_info_by_host_api_device_index(
0, i)
if (len(devices) == 0):
raise RuntimeError("Cannot find any valid input devices")
print("\nInput Devices:")
for id, info in devices.items():
print("{}: {}".format(id,info.get("name")))
input_device_id = int(input("Enter device id to use: "))
# We then employ the selected device, record from it and play back to verify that everything is in order.
# In[6]:
import pprint
pp = pprint.PrettyPrinter(indent=4)
print("Device info:")
devinfo = p.get_device_info_by_index(input_device_id) # Or whatever device you care about.
pp.pprint(devinfo)
chunk = 1024 # Record in chunks of 1024 samples
sample_format = pyaudio.paInt16 # 16 bits per sample
channels = 1
fs = devinfo['defaultSampleRate'] # Record at device default sampling rate
seconds = 3
filename = "test.wav"
print('Recording')
stream = p.open(format=sample_format,
channels=channels,
rate=int(devinfo["defaultSampleRate"]),
frames_per_buffer=chunk,
input=True,
input_device_index=input_device_id)
frames = [] # Initialize array to store frames
# Store data in chunks for 3 seconds
for i in range(0, int(fs / chunk * seconds)):
data = stream.read(chunk)
frames.append(data)
# Stop and close the stream
stream.stop_stream()
stream.close()
# Terminate the PortAudio interface
# p.terminate()
print('Finished recording')
# Save the recorded data as a WAV file
wf = wave.open(filename, 'wb')
wf.setnchannels(channels)
wf.setsampwidth(p.get_sample_size(sample_format))
wf.setframerate(fs)
wf.writeframes(b''.join(frames))
wf.close()
# In[ ]:
import IPython.display as ipd
ipd.Audio(filename)
# <a id="3"></a>
# ## 3. Audio helper classes
# Next, we define some helper classes for pre-processing audio. The below AudioSegment class takes audio signal and converts the sampling rate to that required by the Kaldi ASR model, which is 16000Hz by default.
#
# Note: For historical reasons, Kaldi expects waveforms in the range (2^15-1)x[-1, 1], not the usual default DSP range [-1, 1]. Therefore, we scale the audio signal by a factor of (2^15-1).
# In[8]:
WAV_SCALE_FACTOR = 2**15-1
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=16000, trim=False,
trim_db=60):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.core.resample(samples, sample_rate, target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / ((2 ** (bits - 1)) - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return WAV_SCALE_FACTOR * float32_samples
@classmethod
def from_file(cls, filename, target_sr=16000, offset=0, duration=0,
min_duration=0, trim=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype_options = {'PCM_16': 'int16', 'PCM_32': 'int32', 'FLOAT': 'float32'}
dtype_file = f.subtype
if dtype_file in dtype_options:
dtype = dtype_options[dtype_file]
else:
dtype = 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
num_zero_pad = int(target_sr * min_duration - samples.shape[0])
if num_zero_pad > 0:
samples = np.pad(samples, [0, num_zero_pad], mode='constant')
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr, trim=trim)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
# <a id="4"></a>
# ## Inference
#
# We first create an inference context object that connects to the Kaldi TRTIS servier via a gPRC connection.
#
# The server expects chunks of audio each containing up to input.WAV_DATA.dims samples (default: 8160). Per default, this corresponds to 510ms of audio per chunk (i.e. 16000Hz sampling rate). The last chunk can send a partial chunk smaller than this maximum value.
# In[9]:
from tensorrtserver.api import *
protocol = ProtocolType.from_str("grpc")
CORRELATION_ID = 11101
ctx = InferContext(FLAGS.url, protocol, FLAGS.model_name, FLAGS.model_version,
correlation_id=CORRELATION_ID, verbose=True,
streaming=False)
# Next, we take chunks of audio (each 510ms in duration, containing 8160 samples) from the microphone and stream them sequentially to the Kaldi server. The server processes each chunk as soon as it is received.
#
# Unlike data from a .wav file, as we take the data continuoulsy from the mic, there is no `end` marker. Therefore, we receive the result once every 10 chunks. Note that the server will reset it status once the result is sent out.
# In[11]:
class TranscribeFromMicrophone:
def __init__(self,input_device_id, target_sr, chunk_duration):
self.recording_state = "init"
self.target_sr = target_sr
self.chunk_duration = chunk_duration
self.p = pa.PyAudio()
device_info = self.p.get_host_api_info_by_index(0)
num_devices = device_info.get('deviceCount')
devices = {}
for i in range(0, num_devices):
if (self.p.get_device_info_by_host_api_device_index(0, i).get(
'maxInputChannels')) > 0:
devices[i] = self.p.get_device_info_by_host_api_device_index(
0, i)
if (len(devices) == 0):
raise RuntimeError("Cannot find any valid input devices")
if input_device_id is None or input_device_id not in \
devices.keys():
print("\nInput Devices:")
for id, info in devices.items():
print("{}: {}".format(id,info.get("name")))
input_device_id = int(input("Enter device id to use: "))
self.input_device_id = input_device_id
devinfo = self.p.get_device_info_by_index(input_device_id)
self.device_default_sr = int(devinfo['defaultSampleRate'])
print("Device sample rate: %d" % self.device_default_sr)
def transcribe_audio(self, streaming=True):
ctx = InferContext(FLAGS.url, protocol, FLAGS.model_name, FLAGS.model_version,
correlation_id=CORRELATION_ID, verbose=True,
streaming=False)
chunk_size = int(self.chunk_duration*self.device_default_sr)
self.recording_state = "init"
def keyboard_listener():
input("**********Press Enter to start and end transcribing...**********")
self.recording_state = "capture"
print("Recording...")
input("")
self.recording_state = "release"
listener = threading.Thread(target=keyboard_listener)
listener.start()
start = True
print("starting....")
stream_initialized = False
audio_signal = 0
audio_segment = 0
end = False
cnt = 0
MAX_CHUNKS = 10
while self.recording_state != "release":
try:
if self.recording_state == "capture":
if not stream_initialized:
stream = self.p.open(
format=pa.paInt16,
channels=1,
rate=self.device_default_sr,
input=True,
input_device_index=self.input_device_id,
frames_per_buffer=chunk_size)
stream_initialized = True
# Read an audio chunk from microphone
audio_signal = stream.read(chunk_size, exception_on_overflow = False)
if self.recording_state == "release":
break
end = True
audio_signal = np.frombuffer(audio_signal,dtype=np.int16)
audio_segment = AudioSegment(audio_signal,
self.device_default_sr,
self.target_sr)
if cnt == MAX_CHUNKS:
end = True
if cnt > 1:
start = False
# Inference
flags = InferRequestHeader.FLAG_NONE
x = (audio_segment.samples, self.target_sr, start, end)
if x[2]:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
if x[3]:
flags = flags | InferRequestHeader.FLAG_SEQUENCE_END
if not end:
ctx.run({'WAV_DATA' : (x[0],),
'WAV_DATA_DIM' : (np.full(shape=1, fill_value=len(x[0]), dtype=np.int32),)},
{},
batch_size=1,
flags=flags,
corr_id=CORRELATION_ID)
else:
res = ctx.run({'WAV_DATA' : (x[0],),
'WAV_DATA_DIM' : (np.full(shape=1, fill_value=len(x[0]), dtype=np.int32),)},
{ 'TEXT' : InferContext.ResultFormat.RAW },
batch_size=1,
flags=flags,
corr_id=CORRELATION_ID)
print("".join([x.decode('utf-8') for x in res['TEXT'][0]]))
if cnt == MAX_CHUNKS: # reset server
start = True
end = False
cnt = 0
cnt += 1
sys.stdout.write("\r" + "."*cnt)
sys.stdout.flush()
except Exception as e:
print(e)
break
stream.close()
self.p.terminate()
# In[12]:
transcriber = TranscribeFromMicrophone(input_device_id,
target_sr=FLAGS.sample_rate,
chunk_duration=FLAGS.chunk_duration)
# After executing the below cell, upon pressing ENTER, the mic will start recording chunks of audio from the specified mic and stream them continuously to the server. After every 10 chunks, the client takes and display the results, while the status of the server is reset, i.e., it treats the next chunk as the start of a fresh new request.
# When pressing ENTER again, the client stops.
#
#
# In[ ]:
transcriber.transcribe_audio()
# # Conclusion
#
# In this notebook, we have walked through the complete process of preparing the audio data from a microphone and carry out inference with the Kaldi ASR model.
#
# ## What's next
# Now it's time to try the Kaldi ASR model on your own data. The online client can also be further improved, for example, by detecting natural breaks in the input stream (e.g., silence) to break sentence more properly.
#
# In[ ]:
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt | tft_pyt | train | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import time
import os
import pickle
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
from apex import amp
from apex.optimizers import FusedAdam
#from torch.nn.parallel import DistributedDataParallel as DDP
from apex.parallel import DistributedDataParallel as DDP
import numpy as np
import dllogger
from modeling import TemporalFusionTransformer
from configuration import CONFIGS
from data_utils import TFTBinaryDataset, sample_data
from log_helper import setup_logger
from criterions import QuantileLoss
from inference import predict
from utils import PerformanceMeter
import gpu_affinity
from ema import ModelEma
def load_dataset(args, config):
train_split = TFTBinaryDataset(os.path.join(args.data_path, 'train.bin'), config)
train_split = sample_data(train_split, args.sample_data[0])
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(train_split, args.distributed_world_size, args.distributed_rank, seed=args.seed + args.distributed_rank, drop_last=True)
else:
data_sampler = RandomSampler(train_split)
train_loader = DataLoader(train_split, batch_size=args.batch_size, num_workers=4, sampler=data_sampler, pin_memory=True)
valid_split = TFTBinaryDataset(os.path.join(args.data_path, 'valid.bin'), config)
valid_split = sample_data(valid_split, args.sample_data[1])
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(valid_split, args.distributed_world_size, args.distributed_rank, shuffle=False, drop_last=False)
else:
data_sampler = None
valid_loader = DataLoader(valid_split, batch_size=args.batch_size, sampler=data_sampler, num_workers=4, pin_memory=True)
test_split = TFTBinaryDataset(os.path.join(args.data_path, 'test.bin'), config)
if args.distributed_world_size > 1:
data_sampler = DistributedSampler(test_split, args.distributed_world_size, args.distributed_rank, shuffle=False, drop_last=False)
else:
data_sampler = None
test_loader = DataLoader(test_split, batch_size=args.batch_size, sampler=data_sampler, num_workers=4, pin_memory=True)
print_once(f'Train split length: {len(train_split)}')
print_once(f'Valid split length: {len(valid_split)}')
print_once(f'Test split length: {len(test_split)}')
return train_loader, valid_loader, test_loader
def print_once(*args, **kwargs):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*args, **kwargs)
def main(args):
### INIT DISTRIBUTED
args.distributed_world_size = int(os.environ.get('WORLD_SIZE', 1))
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
if args.distributed_world_size > 1:
dist.init_process_group(backend='nccl', init_method='env://')
print_once(f'Distributed training with {args.distributed_world_size} GPUs')
args.distributed_rank = dist.get_rank()
torch.cuda.set_device(args.local_rank)
torch.cuda.synchronize()
# Enable CuDNN autotuner
nproc_per_node = torch.cuda.device_count()
if args.affinity != 'disabled':
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
torch.backends.cudnn.benchmark = True
if args.seed:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
setup_logger(args)
config = CONFIGS[args.dataset]()
if args.overwrite_config:
config.__dict__.update(json.loads(args.overwrite_config))
dllogger.log(step='HPARAMS', data={**vars(args), **vars(config)}, verbosity=1)
model = TemporalFusionTransformer(config).cuda()
if args.ema_decay:
model_ema = ModelEma(model, decay=args.ema_decay)
print_once('Model params: {}'.format(sum(p.numel() for p in model.parameters())))
criterion = QuantileLoss(config).cuda()
optimizer = FusedAdam(model.parameters(), lr=args.lr)
if args.use_amp:
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale="dynamic")
if args.distributed_world_size > 1:
#model = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
model = DDP(model)
train_loader, valid_loader, test_loader = load_dataset(args, config)
global_step = 0
perf_meter = PerformanceMeter()
for epoch in range(args.epochs):
start = time.time()
dllogger.log(step=global_step, data={'epoch': epoch}, verbosity=1)
model.train()
for local_step, batch in enumerate(train_loader):
perf_meter.reset_current_lap()
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
loss = p_losses.sum()
if args.use_amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if not args.grad_accumulation or (global_step+1) % args.grad_accumulation == 0:
if args.clip_grad:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad)
optimizer.step()
optimizer.zero_grad()
if args.ema_decay:
model_ema.update(model)
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses /= args.distributed_world_size
loss = p_losses.sum()
torch.cuda.synchronize()
ips = perf_meter.update(args.batch_size * args.distributed_world_size,
exclude_from_total=local_step in [0, len(train_loader)-1])
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': loss.item(), 'items/s':ips}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
global_step += 1
validate(args, config, model_ema if args.ema_decay else model, criterion, valid_loader, global_step)
if validate.early_stop_c >= args.early_stopping:
print_once('Early stopping')
break
### TEST PHASE ###
state_dict = torch.load(os.path.join(args.results, 'checkpoint.pt'), map_location='cpu')
if isinstance(model, DDP):
model.module.load_state_dict(state_dict['model'])
else:
model.load_state_dict(state_dict['model'])
model.cuda().eval()
tgt_scalers = pickle.load(open(os.path.join(args.data_path, 'tgt_scalers.bin'), 'rb'))
cat_encodings = pickle.load(open(os.path.join(args.data_path,'cat_encodings.bin'), 'rb'))
unscaled_predictions, unscaled_targets, _, _ = predict(args, config, model, test_loader, tgt_scalers, cat_encodings)
losses = QuantileLoss(config)(unscaled_predictions, unscaled_targets)
normalizer = unscaled_targets.abs().mean()
quantiles = 2 * losses / normalizer
if args.distributed_world_size > 1:
quantiles = quantiles.cuda()
dist.all_reduce(quantiles)
quantiles /= args.distributed_world_size
quantiles = {'test_p10': quantiles[0].item(), 'test_p50': quantiles[1].item(), 'test_p90': quantiles[2].item(), 'sum':sum(quantiles).item()}
finish_log = {**quantiles, 'average_ips':perf_meter.avg, 'convergence_step':validate.conv_step}
dllogger.log(step=(), data=finish_log, verbosity=1)
def validate(args, config, model, criterion, dataloader, global_step):
if not hasattr(validate, 'best_valid_loss'):
validate.best_valid_loss = float('inf')
if not hasattr(validate, 'early_stop_c'):
validate.early_stop_c = 0
model.eval()
losses = []
validation_start = time.time()
for batch in dataloader:
with torch.no_grad():
batch = {key: tensor.cuda() if tensor.numel() else None for key, tensor in batch.items()}
predictions = model(batch)
targets = batch['target'][:,config.encoder_length:,:]
p_losses = criterion(predictions, targets)
bs = next(t for t in batch.values() if t is not None).shape[0]
losses.append((p_losses, bs))
validation_end = time.time()
p_losses = sum([l[0]*l[1] for l in losses])/sum([l[1] for l in losses]) #takes into accunt that the last batch is not full
if args.distributed_world_size > 1:
dist.all_reduce(p_losses)
p_losses = p_losses/args.distributed_world_size
ips = len(dataloader.dataset) / (validation_end - validation_start)
log_dict = {'P10':p_losses[0].item(), 'P50':p_losses[1].item(), 'P90':p_losses[2].item(), 'loss': p_losses.sum().item(), 'items/s':ips}
if log_dict['loss'] < validate.best_valid_loss:
validate.best_valid_loss = log_dict['loss']
validate.early_stop_c = 0
validate.conv_step = global_step
if not dist.is_initialized() or dist.get_rank() == 0:
state_dict = model.module.state_dict() if isinstance(model, (DDP, ModelEma)) else model.state_dict()
ckpt = {'args':args, 'config':config, 'model':state_dict}
torch.save(ckpt, os.path.join(args.results, 'checkpoint.pt'))
if args.distributed_world_size > 1:
dist.barrier()
else:
validate.early_stop_c += 1
log_dict = {'val_'+k:v for k,v in log_dict.items()}
dllogger.log(step=global_step, data=log_dict, verbosity=1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, required=True,
help='Path to the dataset')
parser.add_argument('--dataset', type=str, required=True, choices=CONFIGS.keys(),
help='Dataset name')
parser.add_argument('--epochs', type=int, default=25,
help='Default number of training epochs')
parser.add_argument('--sample_data', type=lambda x: int(float(x)), nargs=2, default=[-1, -1],
help="""Subsample the dataset. Specify number of training and valid examples.
Values can be provided in scientific notation. Floats will be truncated.""")
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--use_amp', action='store_true', help='Enable automatic mixed precision')
parser.add_argument('--clip_grad', type=float, default=0.0)
parser.add_argument('--grad_accumulation', type=int, default=0)
parser.add_argument('--early_stopping', type=int, default=1000,
help='Stop training if validation loss does not improve for more than this number of epochs.')
parser.add_argument('--results', type=str, default='/results',
help='Directory in which results are stored')
parser.add_argument('--log_file', type=str, default='dllogger.json',
help='Name of dllogger output file')
parser.add_argument('--overwrite_config', type=str, default='',
help='JSON string used to overload config')
parser.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
parser.add_argument("--ema_decay", type=float, default=0.0, help='Use exponential moving average')
ARGS = parser.parse_args()
main(ARGS)
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules | modules | transpose_last | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
transpose last 2 dimensions of the input
"""
import torch.nn as nn
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
|
TensorFlow2/LanguageModeling/BERT/scripts/configs | configs | pretrain_config | #!/usr/bin/env bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Full LAMB pretraining configs for NVIDIA DGX A100 (8x NVIDIA A100 40GB GPU)
dgxa100_8gpu_fp16 ()
{
train_batch_size_phase1=312
train_batch_size_phase2=40
eval_batch_size=8
learning_rate_phase1="8.12e-4"
learning_rate_phase2="5e-4"
precision="fp16"
use_xla="true"
num_gpus=8
warmup_steps_phase1=2000
warmup_steps_phase2=200
train_steps=6416
save_checkpoints_steps=100
num_accumulation_steps_phase1=32
num_accumulation_steps_phase2=96
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
dgxa100_8gpu_tf32 ()
{
train_batch_size_phase1=176
train_batch_size_phase2=22
eval_batch_size=8
learning_rate_phase1="7.5e-4"
learning_rate_phase2="5e-4"
precision="tf32"
use_xla="true"
num_gpus=8
warmup_steps_phase1=2000
warmup_steps_phase2=200
train_steps=5687
save_checkpoints_steps=100
num_accumulation_steps_phase1=64
num_accumulation_steps_phase2=192
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
# Full LAMB pretraining configs for NVIDIA DGX-2H (16x NVIDIA V100 32GB GPU)
dgx2_16gpu_fp16 ()
{
train_batch_size_phase1=60
train_batch_size_phase2=10
eval_batch_size=8
learning_rate_phase1="3.75e-4"
learning_rate_phase2="2.5e-4"
precision="fp16"
use_xla="true"
num_gpus=16
warmup_steps_phase1=2133
warmup_steps_phase2=213
train_steps=8341
save_checkpoints_steps=100
num_accumulation_steps_phase1=64
num_accumulation_steps_phase2=192
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
dgx2_16gpu_fp32 ()
{
train_batch_size_phase1=32
train_batch_size_phase2=6
eval_batch_size=8
learning_rate_phase1="3.75e-4"
learning_rate_phase2="2.5e-4"
precision="fp32"
use_xla="true"
num_gpus=16
warmup_steps_phase1=2000
warmup_steps_phase2=200
train_steps=7820
save_checkpoints_steps=100
num_accumulation_steps_phase1=128
num_accumulation_steps_phase2=320
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
# Full LAMB pretraining configs for NVIDIA DGX-1 (8x NVIDIA V100 32GB GPU)
dgx1_8gpu_fp16 ()
{
train_batch_size_phase1=60
train_batch_size_phase2=10
eval_batch_size=8
learning_rate_phase1="7.5e-4"
learning_rate_phase2="5e-4"
precision="fp16"
use_xla="true"
num_gpus=8
warmup_steps_phase1=2133
warmup_steps_phase2=213
train_steps=8341
save_checkpoints_steps=100
num_accumulation_steps_phase1=128
num_accumulation_steps_phase2=384
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
dgx1_8gpu_fp32 ()
{
train_batch_size_phase1=32
train_batch_size_phase2=6
eval_batch_size=8
learning_rate_phase1="7.5e-4"
learning_rate_phase2="5e-4"
precision="fp32"
use_xla="true"
num_gpus=8
warmup_steps_phase1=2000
warmup_steps_phase2=200
train_steps=7820
save_checkpoints_steps=100
num_accumulation_steps_phase1=256
num_accumulation_steps_phase2=640
echo $train_batch_size_phase1 $train_batch_size_phase2 $eval_batch_size $learning_rate_phase1 $learning_rate_phase2 $precision $use_xla $num_gpus $warmup_steps_phase1 $warmup_steps_phase2 $train_steps $save_checkpoint_steps $num_accumulation_steps_phase2
}
|
PyTorch/SpeechSynthesis/HiFiGAN/scripts | scripts | inference_benchmark | #!/usr/bin/env bash
export CUDNN_V8_API_ENABLED=1 # Keep the flag for older containers
export TORCH_CUDNN_V8_API_ENABLED=1
set -a
: ${AMP:=false}
: ${CUDNN_BENCHMARK:=true}
: ${FILELIST:="data/filelists/benchmark_8_128.tsv"}
: ${OUTPUT_DIR:="./results"}
: ${TORCHSCRIPT:=true}
: ${REPEATS:=200}
: ${WARMUP:=100}
: ${DENOISING:=0.0}
: ${BATCH_SIZE:=1} # 1 2 4 8
LOG_FILE="$OUTPUT_DIR"/perf-infer_amp-${AMP}_bs${BATCH_SIZE}
LOG_FILE+=_denoising${DENOISING}
LOG_FILE+=_torchscript-${TORCHSCRIPT}
LOG_FILE+=.json
bash scripts/inference_example.sh "$@"
|
TensorFlow/Translation/GNMT/examples | examples | DGX1_AMP_1GPU | python nmt.py --output_dir=results --batch_size=128 --learning_rate=5e-4 --amp
|
PyTorch/Classification/ConvNets/image_classification/models | models | common | import copy
from collections import OrderedDict
from dataclasses import dataclass
from typing import Optional
import torch
import warnings
from torch import nn
import torch.nn.functional as F
try:
from pytorch_quantization import nn as quant_nn
except ImportError as e:
warnings.warn(
"pytorch_quantization module not found, quantization will not be available"
)
quant_nn = None
# LayerBuilder {{{
class LayerBuilder(object):
@dataclass
class Config:
activation: str = "relu"
conv_init: str = "fan_in"
bn_momentum: Optional[float] = None
bn_epsilon: Optional[float] = None
def __init__(self, config: "LayerBuilder.Config"):
self.config = config
def conv(
self,
kernel_size,
in_planes,
out_planes,
groups=1,
stride=1,
bn=False,
zero_init_bn=False,
act=False,
):
conv = nn.Conv2d(
in_planes,
out_planes,
kernel_size=kernel_size,
groups=groups,
stride=stride,
padding=int((kernel_size - 1) / 2),
bias=False,
)
nn.init.kaiming_normal_(
conv.weight, mode=self.config.conv_init, nonlinearity="relu"
)
layers = [("conv", conv)]
if bn:
layers.append(("bn", self.batchnorm(out_planes, zero_init_bn)))
if act:
layers.append(("act", self.activation()))
if bn or act:
return nn.Sequential(OrderedDict(layers))
else:
return conv
def convDepSep(
self, kernel_size, in_planes, out_planes, stride=1, bn=False, act=False
):
"""3x3 depthwise separable convolution with padding"""
c = self.conv(
kernel_size,
in_planes,
out_planes,
groups=in_planes,
stride=stride,
bn=bn,
act=act,
)
return c
def conv3x3(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""3x3 convolution with padding"""
c = self.conv(
3, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv1x1(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""1x1 convolution with padding"""
c = self.conv(
1, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv7x7(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""7x7 convolution with padding"""
c = self.conv(
7, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def conv5x5(self, in_planes, out_planes, stride=1, groups=1, bn=False, act=False):
"""5x5 convolution with padding"""
c = self.conv(
5, in_planes, out_planes, groups=groups, stride=stride, bn=bn, act=act
)
return c
def batchnorm(self, planes, zero_init=False):
bn_cfg = {}
if self.config.bn_momentum is not None:
bn_cfg["momentum"] = self.config.bn_momentum
if self.config.bn_epsilon is not None:
bn_cfg["eps"] = self.config.bn_epsilon
bn = nn.BatchNorm2d(planes, **bn_cfg)
gamma_init_val = 0 if zero_init else 1
nn.init.constant_(bn.weight, gamma_init_val)
nn.init.constant_(bn.bias, 0)
return bn
def activation(self):
return {
"silu": lambda: nn.SiLU(inplace=True),
"relu": lambda: nn.ReLU(inplace=True),
"onnx-silu": ONNXSiLU,
}[self.config.activation]()
# LayerBuilder }}}
# LambdaLayer {{{
class LambdaLayer(nn.Module):
def __init__(self, lmbd):
super().__init__()
self.lmbd = lmbd
def forward(self, x):
return self.lmbd(x)
# }}}
# SqueezeAndExcitation {{{
class SqueezeAndExcitation(nn.Module):
def __init__(self, in_channels, squeeze, activation):
super(SqueezeAndExcitation, self).__init__()
self.squeeze = nn.Linear(in_channels, squeeze)
self.expand = nn.Linear(squeeze, in_channels)
self.activation = activation
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self._attention(x)
def _attention(self, x):
out = torch.mean(x, [2, 3])
out = self.squeeze(out)
out = self.activation(out)
out = self.expand(out)
out = self.sigmoid(out)
out = out.unsqueeze(2).unsqueeze(3)
return out
class SqueezeAndExcitationTRT(nn.Module):
def __init__(self, in_channels, squeeze, activation):
super(SqueezeAndExcitationTRT, self).__init__()
self.pooling = nn.AdaptiveAvgPool2d(1)
self.squeeze = nn.Conv2d(in_channels, squeeze, 1)
self.expand = nn.Conv2d(squeeze, in_channels, 1)
self.activation = activation
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self._attention(x)
def _attention(self, x):
out = self.pooling(x)
out = self.squeeze(out)
out = self.activation(out)
out = self.expand(out)
out = self.sigmoid(out)
return out
# }}}
# EMA {{{
class EMA:
def __init__(self, mu, module_ema):
self.mu = mu
self.module_ema = module_ema
def __call__(self, module, step=None):
if step is None:
mu = self.mu
else:
mu = min(self.mu, (1.0 + step) / (10 + step))
def strip_module(s: str) -> str:
return s
mesd = self.module_ema.state_dict()
with torch.no_grad():
for name, x in module.state_dict().items():
if name.endswith("num_batches_tracked"):
continue
n = strip_module(name)
mesd[n].mul_(mu)
mesd[n].add_((1.0 - mu) * x)
# }}}
# ONNXSiLU {{{
# Since torch.nn.SiLU is not supported in ONNX,
# it is required to use this implementation in exported model (15-20% more GPU memory is needed)
class ONNXSiLU(nn.Module):
def __init__(self, *args, **kwargs):
super(ONNXSiLU, self).__init__()
def forward(self, x):
return x * torch.sigmoid(x)
# }}}
class SequentialSqueezeAndExcitation(SqueezeAndExcitation):
def __init__(self, in_channels, squeeze, activation, quantized=False):
super().__init__(in_channels, squeeze, activation)
self.quantized = quantized
if quantized:
assert quant_nn is not None, "pytorch_quantization is not available"
self.mul_a_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
self.mul_b_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
else:
self.mul_a_quantizer = nn.Identity()
self.mul_b_quantizer = nn.Identity()
def forward(self, x):
out = self._attention(x)
if not self.quantized:
return out * x
else:
x_quant = self.mul_a_quantizer(out)
return x_quant * self.mul_b_quantizer(x)
class SequentialSqueezeAndExcitationTRT(SqueezeAndExcitationTRT):
def __init__(self, in_channels, squeeze, activation, quantized=False):
super().__init__(in_channels, squeeze, activation)
self.quantized = quantized
if quantized:
assert quant_nn is not None, "pytorch_quantization is not available"
self.mul_a_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
self.mul_b_quantizer = quant_nn.TensorQuantizer(
quant_nn.QuantConv2d.default_quant_desc_input
)
else:
self.mul_a_quantizer = nn.Identity()
self.mul_b_quantizer = nn.Identity()
def forward(self, x):
out = self._attention(x)
if not self.quantized:
return out * x
else:
x_quant = self.mul_a_quantizer(out)
return x_quant * self.mul_b_quantizer(x)
class StochasticDepthResidual(nn.Module):
def __init__(self, survival_prob: float):
super().__init__()
self.survival_prob = survival_prob
self.register_buffer("mask", torch.ones(()), persistent=False)
def forward(self, residual: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
if not self.training:
return torch.add(residual, other=x)
else:
with torch.no_grad():
mask = F.dropout(
self.mask,
p=1 - self.survival_prob,
training=self.training,
inplace=False,
)
return torch.addcmul(residual, mask, x)
class Flatten(nn.Module):
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x.squeeze(-1).squeeze(-1)
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit | deployment_toolkit | args | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Any, Callable, Dict, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif type(parameter.annotation) == type(Union): # isinstance(parameter.annotation, type(Optional[Any])):
types = [type_ for type_ in parameter.annotation.__args__ if not isinstance(None, type_)]
if len(types) != 1:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
argument_kwargs["type"] = types[0]
else:
argument_kwargs["type"] = parameter.annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, "__init__")
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | i3d_utils | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for building I3D network models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Orignaly, add_arg_scope = slim.add_arg_scope and layers = slim, now switch to
# more update-to-date tf.contrib.* API.
add_arg_scope = tf.contrib.framework.add_arg_scope
layers = tf.contrib.layers
def center_initializer():
"""Centering Initializer for I3D.
This initializer allows identity mapping for temporal convolution at the
initialization, which is critical for a desired convergence behavior
for training a seprable I3D model.
The centering behavior of this initializer requires an odd-sized kernel,
typically set to 3.
Returns:
A weight initializer op used in temporal convolutional layers.
Raises:
ValueError: Input tensor data type has to be tf.float32.
ValueError: If input tensor is not a 5-D tensor.
ValueError: If input and output channel dimensions are different.
ValueError: If spatial kernel sizes are not 1.
ValueError: If temporal kernel size is even.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None): # pylint: disable=unused-argument
"""Initializer op."""
if dtype != tf.float32 and dtype != tf.bfloat16:
raise ValueError(
'Input tensor data type has to be tf.float32 or tf.bfloat16.')
if len(shape) != 5:
raise ValueError('Input tensor has to be 5-D.')
if shape[3] != shape[4]:
raise ValueError('Input and output channel dimensions must be the same.')
if shape[1] != 1 or shape[2] != 1:
raise ValueError('Spatial kernel sizes must be 1 (pointwise conv).')
if shape[0] % 2 == 0:
raise ValueError('Temporal kernel size has to be odd.')
center_pos = int(shape[0] / 2)
init_mat = np.zeros(
[shape[0], shape[1], shape[2], shape[3], shape[4]], dtype=np.float32)
for i in range(0, shape[3]):
init_mat[center_pos, 0, 0, i, i] = 1.0
init_op = tf.constant(init_mat, dtype=dtype)
return init_op
return _initializer
@add_arg_scope
def conv3d_spatiotemporal(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_regularizer=None,
separable=False,
data_format='NDHWC',
scope=''):
"""A wrapper for conv3d to model spatiotemporal representations.
This allows switching between original 3D convolution and separable 3D
convolutions for spatial and temporal features respectively. On Kinetics,
seprable 3D convolutions yields better classification performance.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs: integer, the number of output filters.
kernel_size: a list of length 3
`[kernel_depth, kernel_height, kernel_width]` of the filters. Can be an
int if all values are the same.
stride: a list of length 3 `[stride_depth, stride_height, stride_width]`.
Can be an int if all strides are the same.
padding: one of `VALID` or `SAME`.
activation_fn: activation function.
normalizer_fn: normalization function to use instead of `biases`.
normalizer_params: dictionary of normalization function parameters.
weights_regularizer: Optional regularizer for the weights.
separable: If `True`, use separable spatiotemporal convolutions.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A tensor representing the output of the (separable) conv3d operation.
"""
assert len(kernel_size) == 3
if separable and kernel_size[0] != 1:
spatial_kernel_size = [1, kernel_size[1], kernel_size[2]]
temporal_kernel_size = [kernel_size[0], 1, 1]
if isinstance(stride, list) and len(stride) == 3:
spatial_stride = [1, stride[1], stride[2]]
temporal_stride = [stride[0], 1, 1]
else:
spatial_stride = [1, stride, stride]
temporal_stride = [stride, 1, 1]
net = layers.conv3d(
inputs,
num_outputs,
spatial_kernel_size,
stride=spatial_stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
net = layers.conv3d(
net,
num_outputs,
temporal_kernel_size,
stride=temporal_stride,
padding=padding,
scope=scope + '/temporal',
activation_fn=activation_fn,
normalizer_fn=None,
data_format=data_format,
weights_initializer=center_initializer())
return net
else:
return layers.conv3d(
inputs,
num_outputs,
kernel_size,
stride=stride,
padding=padding,
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params,
weights_regularizer=weights_regularizer,
data_format=data_format,
scope=scope)
@add_arg_scope
def inception_block_v1_3d(inputs,
num_outputs_0_0a,
num_outputs_1_0a,
num_outputs_1_0b,
num_outputs_2_0a,
num_outputs_2_0b,
num_outputs_3_0b,
temporal_kernel_size=3,
self_gating_fn=None,
data_format='NDHWC',
scope=''):
"""A 3D Inception v1 block.
This allows use of separable 3D convolutions and self-gating, as
described in:
Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu and Kevin Murphy,
Rethinking Spatiotemporal Feature Learning For Video Understanding.
https://arxiv.org/abs/1712.04851.
Args:
inputs: a 5-D tensor `[batch_size, depth, height, width, channels]`.
num_outputs_0_0a: integer, the number of output filters for Branch 0,
operation Conv2d_0a_1x1.
num_outputs_1_0a: integer, the number of output filters for Branch 1,
operation Conv2d_0a_1x1.
num_outputs_1_0b: integer, the number of output filters for Branch 1,
operation Conv2d_0b_3x3.
num_outputs_2_0a: integer, the number of output filters for Branch 2,
operation Conv2d_0a_1x1.
num_outputs_2_0b: integer, the number of output filters for Branch 2,
operation Conv2d_0b_3x3.
num_outputs_3_0b: integer, the number of output filters for Branch 3,
operation Conv2d_0b_1x1.
temporal_kernel_size: integer, the size of the temporal convolutional
filters in the conv3d_spatiotemporal blocks.
self_gating_fn: function which optionally performs self-gating.
Must have two arguments, `inputs` and `scope`, and return one output
tensor the same size as `inputs`. If `None`, no self-gating is
applied.
data_format: An optional string from: "NDHWC", "NCDHW". Defaults to "NDHWC".
The data format of the input and output data. With the default format
"NDHWC", the data is stored in the order of: [batch, in_depth, in_height,
in_width, in_channels]. Alternatively, the format could be "NCDHW", the
data storage order is:
[batch, in_channels, in_depth, in_height, in_width].
scope: scope for `variable_scope`.
Returns:
A 5-D tensor `[batch_size, depth, height, width, out_channels]`, where
`out_channels = num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b
+ num_outputs_3_0b`.
"""
use_gating = self_gating_fn is not None
with tf.variable_scope(scope):
with tf.variable_scope('Branch_0'):
branch_0 = layers.conv3d(
inputs, num_outputs_0_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
if use_gating:
branch_0 = self_gating_fn(branch_0, scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = layers.conv3d(
inputs, num_outputs_1_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_1 = conv3d_spatiotemporal(
branch_1, num_outputs_1_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_1 = self_gating_fn(branch_1, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = layers.conv3d(
inputs, num_outputs_2_0a, [1, 1, 1], scope='Conv2d_0a_1x1')
branch_2 = conv3d_spatiotemporal(
branch_2, num_outputs_2_0b, [temporal_kernel_size, 3, 3],
scope='Conv2d_0b_3x3')
if use_gating:
branch_2 = self_gating_fn(branch_2, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = layers.max_pool3d(inputs, [3, 3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv3d(
branch_3, num_outputs_3_0b, [1, 1, 1], scope='Conv2d_0b_1x1')
if use_gating:
branch_3 = self_gating_fn(branch_3, scope='Conv2d_0b_1x1')
index_c = data_format.index('C')
assert 1 <= index_c <= 4, 'Cannot identify channel dimension.'
output = tf.concat([branch_0, branch_1, branch_2, branch_3], index_c)
return output
def reduced_kernel_size_3d(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size
[batch_size, time, height, width, channels].
kernel_size: desired kernel size of length 3, corresponding to time,
height and width.
Returns:
a tensor with the kernel size.
"""
assert len(kernel_size) == 3
shape = input_tensor.get_shape().as_list()
assert len(shape) == 5
if None in shape[1:4]:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1]),
min(shape[3], kernel_size[2])]
return kernel_size_out
|
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs | configs | rfcn_resnet101_pets | # R-FCN with Resnet-101 (v1), configured for Oxford-IIIT Pets Dataset.
# Users should configure the fine_tune_checkpoint field in the train config as
# well as the label_map_path and input_path fields in the train_input_reader and
# eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that
# should be configured.
model {
faster_rcnn {
num_classes: 37
image_resizer {
keep_aspect_ratio_resizer {
min_dimension: 600
max_dimension: 1024
}
}
feature_extractor {
type: 'faster_rcnn_resnet101'
first_stage_features_stride: 16
}
first_stage_anchor_generator {
grid_anchor_generator {
scales: [0.25, 0.5, 1.0, 2.0]
aspect_ratios: [0.5, 1.0, 2.0]
height_stride: 16
width_stride: 16
}
}
first_stage_box_predictor_conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
first_stage_nms_score_threshold: 0.0
first_stage_nms_iou_threshold: 0.7
first_stage_max_proposals: 300
first_stage_localization_loss_weight: 2.0
first_stage_objectness_loss_weight: 1.0
second_stage_box_predictor {
rfcn_box_predictor {
conv_hyperparams {
op: CONV
regularizer {
l2_regularizer {
weight: 0.0
}
}
initializer {
truncated_normal_initializer {
stddev: 0.01
}
}
}
crop_height: 18
crop_width: 18
num_spatial_bins_height: 3
num_spatial_bins_width: 3
}
}
second_stage_post_processing {
batch_non_max_suppression {
score_threshold: 0.0
iou_threshold: 0.6
max_detections_per_class: 100
max_total_detections: 300
}
score_converter: SOFTMAX
}
second_stage_localization_loss_weight: 2.0
second_stage_classification_loss_weight: 1.0
}
}
train_config: {
batch_size: 1
optimizer {
momentum_optimizer: {
learning_rate: {
manual_step_learning_rate {
initial_learning_rate: 0.0003
schedule {
step: 900000
learning_rate: .00003
}
schedule {
step: 1200000
learning_rate: .000003
}
}
}
momentum_optimizer_value: 0.9
}
use_moving_average: false
}
gradient_clipping_by_norm: 10.0
fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt"
from_detection_checkpoint: true
load_all_detection_checkpoint_vars: true
# Note: The below line limits the training process to 200K steps, which we
# empirically found to be sufficient enough to train the pets dataset. This
# effectively bypasses the learning rate schedule (the learning rate will
# never decay). Remove the below line to train indefinitely.
num_steps: 200000
data_augmentation_options {
random_horizontal_flip {
}
}
}
train_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
}
eval_config: {
metrics_set: "coco_detection_metrics"
num_examples: 1101
}
eval_input_reader: {
tf_record_input_reader {
input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010"
}
label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt"
shuffle: false
num_readers: 1
}
|
PyTorch/Segmentation/nnUNet | nnUNet | download | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
from subprocess import call
from data_preprocessing.configs import task
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--task", type=str, required=True, help="Task to download")
parser.add_argument("--results", type=str, default="/data", help="Directory for data storage")
if __name__ == "__main__":
args = parser.parse_args()
tar_file = task[args.task] + ".tar"
file_path = os.path.join(args.results, tar_file)
call(f"aws s3 cp s3://msd-for-monai-eu/{tar_file} --no-sign-request {args.results}", shell=True)
call(f"tar -xf {file_path} -C {args.results}", shell=True)
call(f"rm -rf {file_path}", shell=True)
|
PyTorch/Translation/Transformer/scripts | scripts | run_DGX2_AMP | #! /bin/bash
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
nvidia-smi
RESULTS_DIR='/results'
CHECKPOINTS_DIR='/results/checkpoints'
mkdir -p $CHECKPOINTS_DIR
: ${SEED:=1}
: ${LR:=0.001}
: ${WARMUP:=4000}
: ${NUM_EPOCHS:=30}
: ${BS:=10240}
: ${NUM_GPU:=16}
STAT_FILE=${RESULTS_DIR}/DGX2_amp_${NUM_GPU}GPU.json
DISTRIBUTED="-m torch.distributed.run --nproc_per_node=${NUM_GPU}"
python ${DISTRIBUTED} /workspace/translation/train.py \
/data/ \
--arch transformer_wmt_en_de_big_t2t \
--share-all-embeddings \
--optimizer adam \
--adam-betas 0.9 0.997 \
--adam-eps 1e-9 \
--clip-norm 0.0 \
--lr-scheduler inverse_sqrt \
--warmup-init-lr 0.0 \
--warmup-updates ${WARMUP} \
--lr $LR \
--min-lr 0.0 \
--dropout 0.1 \
--weight-decay 0.0 \
--criterion label_smoothed_cross_entropy \
--label-smoothing 0.1 \
--max-tokens ${BS} \
--seed ${SEED} \
--max-epoch ${NUM_EPOCHS} \
--no-epoch-checkpoints \
--fuse-layer-norm \
--online-eval \
--log-interval 500 \
--save-dir ${RESULTS_DIR} \
--stat-file ${STAT_FILE} \
--amp
|
DGLPyTorch/DrugDiscovery/SE3Transformer/se3_transformer/data_loading | data_loading | __init__ | from .qm9 import QM9DataModule
|
PyTorch/Forecasting/TFT | TFT | criterions | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class QuantileLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.register_buffer('q', torch.tensor(config.quantiles))
def forward(self, predictions, targets):
diff = predictions - targets
ql = (1-self.q)*F.relu(diff) + self.q*F.relu(-diff)
losses = ql.view(-1, ql.shape[-1]).mean(0)
return losses
def qrisk(pred, tgt, quantiles):
diff = pred - tgt
ql = (1-quantiles)*np.clip(diff,0, float('inf')) + quantiles*np.clip(-diff,0, float('inf'))
losses = ql.reshape(-1, ql.shape[-1])
normalizer = np.abs(tgt).mean()
risk = 2 * losses / normalizer
return risk.mean(0)
|
PyTorch/LanguageModeling/Transformer-XL/pytorch/scripts/tests | tests | infer_bench | #!/bin/bash
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
REPO_DIR=${REPO_DIR:-"/workspace/transformer-xl/pytorch/"}
REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_inference_throughput
MATH=$1
if [[ ${MATH} != "fp16" && ${MATH} != "fp32" ]]; then
echo "Unsupported option for MATH, use either 'fp16' or 'fp32'"
exit 1
fi
if [[ ${MATH} == 'fp16' ]]; then
MATH_OPT='--fp16'
elif [[ ${MATH} == 'fp32' ]]; then
MATH_OPT=''
fi
TYPE=$2
if [[ ${TYPE} != "pytorch" && ${TYPE} != "torchscript" ]]; then
echo "Unsupported option for TYPE, use either 'pytorch' or 'torchscript'"
exit 1
fi
PERF_TOLERANCE=0.9
BATCH_SIZE=16
GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq)
echo 'GPU_NAME:' "${GPU_NAME}"
GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l)
echo 'GPU_COUNT:' "${GPU_COUNT}"
REFERENCE_PERF=$(grep "${MATH},${BATCH_SIZE},${TYPE},${GPU_NAME}" \
${REFERENCE_FILE} | \cut -f 5 -d ',')
if [ -z "${REFERENCE_PERF}" ]; then
echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG"
TARGET_PERF=''
else
PERF_THRESHOLD=$(awk 'BEGIN {print ('"${REFERENCE_PERF}"' * '"${PERF_TOLERANCE}"')}')
TARGET_PERF='--target_throughput '${PERF_THRESHOLD}
fi
cd $REPO_DIR
export CUDA_VISIBLE_DEVICES=0
bash run_wt103_base.sh eval 1 \
--model checkpoint/checkpoint_best.pt \
--target_perplexity 23.4 \
--batch_size "${BATCH_SIZE}" \
--type "${TYPE}" \
${MATH_OPT} \
${TARGET_PERF}
|
PyTorch/SpeechSynthesis/Tacotron2/scripts/docker | docker | build | #!/bin/bash
docker build . --rm -t tacotron2
|
TensorFlow2/Recommendation/WideAndDeep/triton/deployment_toolkit/triton_performance_runner/model_analyzer | model_analyzer | runner | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from typing import List, Optional
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import log_dict, parse_server_url
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode
from .model_analyzer_config import ModelAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
LOGGER = logging.getLogger("triton_performance_runner.model_analyzer")
class ModelAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
self._checkpoints = pathlib.Path("./checkpoints")
self._result_path = result_path
self._verbose = verbose
self._filename_model_inference = "metrics-model-inference.csv"
self._filename_model_gpu = "metrics-model-gpu.csv"
self._profile_config = self._prepare_profile_config(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
model_repository=model_repository,
output_shared_memory_size=output_shared_memory_size,
checkpoints=self._checkpoints,
verbose=verbose,
)
self._analyze_config = self._prepare_analyze_config(
model_name=model_name,
result_path=result_path,
verbose=verbose,
filename_model_inference=self._filename_model_inference,
filename_model_gpu=self._filename_model_gpu,
)
def run(self):
self._result_path.mkdir(parents=True, exist_ok=True)
if self._checkpoints.is_dir():
shutil.rmtree(self._checkpoints.as_posix())
self._checkpoints.mkdir(parents=True, exist_ok=True)
model_analyzer = ModelAnalyzer(config=self._profile_config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=self._verbose)
for file in self._checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
model_analyzer = ModelAnalyzer(config=self._analyze_config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=self._verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
def _prepare_profile_config(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
checkpoints: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
perf_analyzer_config = self._perf_analyzer_config(
input_data,
input_shapes,
measurement_mode,
measurement_interval,
measurement_request_count,
evaluation_mode,
offline_mode,
output_shared_memory_size,
)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol.value,
f"triton_{protocol.value}_endpoint": f"{host}:{port}",
}
if verbose:
log_dict("Model Analyzer profiling configuration", config)
with open("config_profile.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_profile.yaml"
return config
def _prepare_analyze_config(
self,
model_name: str,
result_path: pathlib.Path,
filename_model_inference: str,
filename_model_gpu: str,
verbose: bool,
):
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
log_dict("Model Analyzer analysis configuration", config)
with open("config_analyze.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_analyze.yaml"
return config
def _perf_analyzer_config(
self,
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int = 102400,
):
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
return perf_analyzer_config
|
PyTorch/Recommendation/NCF | NCF | full_test_suite | rm -r /data/cache/ml-20m
## Prepare the standard dataset:
./prepare_dataset.sh
## Prepare the modified dataset:
./test_dataset.sh
## Run on the modified dataset:
./test_cases.sh
## Check featurespec:
python test_featurespec_correctness.py /data/cache/ml-20m/feature_spec.yaml /data/ml-20m/feature_spec_template.yaml
## Other dataset:
rm -r /data/cache/ml-1m
./prepare_dataset.sh ml-1m
python -m torch.distributed.launch --nproc_per_node=1 --use_env ncf.py --data /data/cache/ml-1m --epochs 1 |
PyTorch/SpeechSynthesis/Tacotron2/phrases | phrases | phrase_4_64 | She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
She sells seashells by the seashore, shells she sells are great
|
PyTorch/SpeechSynthesis/FastPitch | FastPitch | .gitignore | runs*/
LJSpeech-1.1/
output*
scripts_joc/
tests/
pretrained_models/
*.pyc
__pycache__
.idea/
.DS_Store
*.swp
*.swo
*.swn
|
PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common | tacotron2_common | layers | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from librosa.filters import mel as librosa_mel_fn
from tacotron2_common.audio_processing import dynamic_range_compression, dynamic_range_decompression
from tacotron2_common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
return self.conv(signal)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
|
TensorFlow2/LanguageModeling/BERT/official/utils/misc | misc | distribution_utils_test | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Tests for distribution util functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.misc import distribution_utils
class GetDistributionStrategyTest(tf.test.TestCase):
"""Tests for get_distribution_strategy."""
def test_one_device_strategy_cpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=0)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('CPU', ds.extended.worker_devices[0])
def test_one_device_strategy_gpu(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=1)
self.assertEquals(ds.num_replicas_in_sync, 1)
self.assertEquals(len(ds.extended.worker_devices), 1)
self.assertIn('GPU', ds.extended.worker_devices[0])
def test_mirrored_strategy(self):
ds = distribution_utils.get_distribution_strategy(num_gpus=5)
self.assertEquals(ds.num_replicas_in_sync, 5)
self.assertEquals(len(ds.extended.worker_devices), 5)
for device in ds.extended.worker_devices:
self.assertIn('GPU', device)
class PerReplicaBatchSizeTest(tf.test.TestCase):
"""Tests for per_replica_batch_size."""
def test_batch_size(self):
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=0), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=1), 147)
self.assertEquals(
distribution_utils.per_replica_batch_size(147, num_gpus=7), 21)
def test_batch_size_with_remainder(self):
with self.assertRaises(ValueError):
distribution_utils.per_replica_batch_size(147, num_gpus=5)
if __name__ == "__main__":
tf.test.main()
|
PyTorch/Detection/SSD/examples | examples | SSD300_FP16_1GPU | # This script launches SSD300 training in FP16 on 1 GPUs using 64 batch size
# Usage bash SSD300_FP16_1GPU.sh <path to this repository> <path to dataset> <additional flags>
python $1/main.py --backbone resnet50 --warmup 300 --bs 64 --data $2 ${@:3}
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data | data | README | # Setting Up Datasets
This file describes how to perform training on other datasets.
Only Pascal VOC dataset can be loaded from its original format and be outputted to Pascal style results currently.
We expect the annotations from other datasets be converted to COCO json format, and
the output will be in COCO-style. (i.e. AP, AP50, AP75, APs, APm, APl for bbox and segm)
## Creating Symlinks for PASCAL VOC
We assume that your symlinked `datasets/voc/VOC<year>` directory has the following structure:
```
VOC<year>
|_ JPEGImages
| |_ <im-1-name>.jpg
| |_ ...
| |_ <im-N-name>.jpg
|_ Annotations
| |_ pascal_train<year>.json (optional)
| |_ pascal_val<year>.json (optional)
| |_ pascal_test<year>.json (optional)
| |_ <im-1-name>.xml
| |_ ...
| |_ <im-N-name>.xml
|_ VOCdevkit<year>
```
Create symlinks for `voc/VOC<year>`:
```
cd ~/github/maskrcnn-benchmark
mkdir -p datasets/voc/VOC<year>
ln -s /path/to/VOC<year> /datasets/voc/VOC<year>
```
Example configuration files for PASCAL VOC could be found [here](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/configs/pascal_voc/).
### PASCAL VOC Annotations in COCO Format
To output COCO-style evaluation result, PASCAL VOC annotations in COCO json format is required and could be downloaded from [here](https://storage.googleapis.com/coco-dataset/external/PASCAL_VOC.zip)
via http://cocodataset.org/#external.
## Creating Symlinks for Cityscapes:
We assume that your symlinked `datasets/cityscapes` directory has the following structure:
```
cityscapes
|_ images
| |_ <im-1-name>.jpg
| |_ ...
| |_ <im-N-name>.jpg
|_ annotations
| |_ instanceonly_gtFile_train.json
| |_ ...
|_ raw
|_ gtFine
|_ ...
|_ README.md
```
Create symlinks for `cityscapes`:
```
cd ~/github/maskrcnn-benchmark
mkdir -p datasets/cityscapes
ln -s /path/to/cityscapes datasets/data/cityscapes
```
### Steps to convert Cityscapes Annotations to COCO Format
1. Download gtFine_trainvaltest.zip from https://www.cityscapes-dataset.com/downloads/ (login required)
2. Extract it to /path/to/gtFine_trainvaltest
```
gtFine_trainvaltest
|_ gtFine
```
3. Run the below commands to convert the annotations
```
cd ~/github
git clone https://github.com/mcordts/cityscapesScripts.git
cd cityscapesScripts
cp ~/github/maskrcnn-benchmark/tool/cityscapes/instances2dict_with_polygons.py cityscapesscripts/evaluation
python setup.py install
cd ~/github/maskrcnn-benchmark
python tools/cityscapes/convert_cityscapes_to_coco.py --datadir /path/to/gtFine_trainvaltest --outdir /path/to/cityscapes/annotations
```
Example configuration files for Cityscapes could be found [here](https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/configs/cityscapes/).
|
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands | commands | synthesize | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from syngen.cli.commands.base_command import BaseCommand
from syngen.configuration.configuration import SynGenConfiguration
from syngen.synthesizer.configuration_graph_synthesizer import ConfigurationGraphSynthesizer
class SynthesizeCommand(BaseCommand):
def init_parser(self, base_parser):
synthesizer = base_parser.add_parser(
"synthesize",
help="Run Graph Synthesizer",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
synthesizer.set_defaults(action=self.run)
synthesizer.add_argument(
"-cp", "--config-path", type=str, default=None, help="Path to SynGen Configuration file"
)
synthesizer.add_argument(
"--timer-path", type=str, default=None,
help="Saves generation process timings to the specified file"
)
synthesizer.add_argument(
"-sp", "--save-path", type=str, default="./generated", required=False,
help="Save path to dump generated files",
)
synthesizer.add_argument(
"--cpu", action='store_true',
help="Runs all operations on CPU. [Attention] Alignment is not available on CPU"
)
synthesizer.add_argument(
"-v", "--verbose", action='store_true',
help="Displays generation process progress"
)
def run(self, args):
dict_args = vars(args)
config_path = dict_args.pop('config_path')
gpu = not dict_args.pop('cpu')
with open(config_path, 'r') as f:
configuration = json.load(f)
configuration = SynGenConfiguration(configuration)
synthesizer = ConfigurationGraphSynthesizer(
configuration,
gpu=gpu,
**dict_args,
)
synthesizer.fit()
synthesizer.generate(return_data=False)
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/deployment/deployment_toolkit/triton_performance_runner/perf_analyzer | perf_analyzer | runner | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import pathlib
import sys
from distutils.version import LooseVersion
from typing import Dict, List, Optional, Tuple
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...report import save_results, show_results, sort_results
from ...utils import log_dict, parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(
pkg_resources.get_distribution("tritonclient").version
)
LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer")
class PerfAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: Dict[int, Tuple],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
flattened_input: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"timeout": timeout,
"verbose": verbose,
},
)
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
self._server_url = server_url
self._model_name = model_name
self._input_data = input_data
self._batch_sizes = batch_sizes
self._concurrency = concurrency
self._measurement_mode = measurement_mode
self._measurement_interval = measurement_interval
self._measurement_request_count = measurement_request_count
self._evaluation_mode = evaluation_mode
self._offline_mode = offline_mode
self._result_path = result_path
self._output_shared_memory_size = output_shared_memory_size
self._timeout = timeout
self._verbose = verbose
self._protocol, self._host, self._port = parse_server_url(server_url)
self._flattened_input = flattened_input
def run(self):
results: List[Dict] = []
for batch_size in self._batch_sizes:
print("Measuring inference performance ")
input_data_filename, shapes = self._input_data[batch_size]
concurrency = 1
performance_partial_file = f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
perf_analyzer_batch_size = 1 if self._flattened_input else batch_size
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": perf_analyzer_batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": input_data_filename,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if self._verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
if self._verbose:
log_dict(
f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}",
params,
)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
self._update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=self._result_path.as_posix(), data=results)
show_results(results=results)
def _calculate_average_latency(self, r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields)
return avg_latency
def _update_performance_data(
self, results: List, batch_size: int, performance_partial_file: str
):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = self._calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
if self._flattened_input:
# correction necessary because "formally" this is run with batch_size=1
row["Inferences/Second"] = str(
float(row["Inferences/Second"]) * batch_size
)
results.append(row)
|