relative_path
stringclasses 812
values | section
stringclasses 339
values | filename
stringlengths 2
61
| text
stringlengths 6
1.76M
|
---|---|---|---|
PyTorch/SpeechSynthesis/FastPitch/fastpitch | fastpitch | transformer_jit | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.utils import mask_from_lens
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz: Optional[int] = None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.dropout = dropout
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask: Optional[torch.Tensor] = None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
# XXX Running TorchScript on 20.02 and 20.03 containers crashes here
# XXX Works well with 20.01-py3 container.
# XXX dirty fix is:
# XXX output = self.layer_norm(residual + attn_out).half()
output = self.layer_norm(residual + attn_out)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, mask):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
dropout, dropatt, dropemb=0.0, embed_input=True,
n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False):
super(FFTransformer, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.padding_idx = padding_idx
self.n_embed = n_embed
self.embed_input = embed_input
if embed_input:
self.word_emb = nn.Embedding(n_embed, d_embed or d_model,
padding_idx=self.padding_idx)
else:
self.word_emb = nn.Identity()
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head, d_model, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
def forward(self, dec_inp, seq_lens: Optional[torch.Tensor] = None,
conditioning: Optional[torch.Tensor] = None):
if not self.embed_input:
inp = dec_inp
assert seq_lens is not None
mask = mask_from_lens(seq_lens).unsqueeze(2)
else:
inp = self.word_emb(dec_inp)
# [bsz x L x 1]
mask = (dec_inp != self.padding_idx).unsqueeze(2)
pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
if conditioning is not None:
out = self.drop(inp + pos_emb + conditioning)
else:
out = self.drop(inp + pos_emb)
for layer in self.layers:
out = layer(out, mask=mask)
# out = self.drop(out)
return out, mask
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit | deployment_toolkit | __init__ | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
TensorFlow2/LanguageModeling/BERT/scripts | scripts | run_pretraining_lamb_phase1 | #! /bin/bash
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
echo "Container nvidia build = " $NVIDIA_BUILD_ID
train_batch_size_phase1=${1:-60}
train_batch_size_phase2=${2:-10}
eval_batch_size=${3:-8}
learning_rate_phase1=${4:-"7.5e-4"}
learning_rate_phase2=${5:-"5e-4"}
precision=${6:-"fp16"}
use_xla=${7:-"true"}
num_gpus=${8:-8}
warmup_steps_phase1=${9:-"2133"}
warmup_steps_phase2=${10:-"213"}
train_steps=${11:-8341}
save_checkpoints_steps=${12:-100}
num_accumulation_steps_phase1=${13:-128}
num_accumulation_steps_phase2=${14:-384}
bert_model=${15:-"large"}
DATA_DIR=${DATA_DIR:-data}
#Edit to save logs & checkpoints in a different directory
RESULTS_DIR=${RESULTS_DIR:-/results}
if [ "$bert_model" = "large" ] ; then
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/bert_config.json
else
export BERT_CONFIG=data/download/google_pretrained_weights/uncased_L-12_H-768_A-12/bert_config.json
fi
PREC=""
if [ "$precision" = "fp16" ] ; then
PREC="--use_fp16"
elif [ "$precision" = "fp32" ] || [ "$precision" = "tf32" ] ; then
PREC=""
else
echo "Unknown <precision> argument"
exit -2
fi
if [ "$use_xla" = "true" ] ; then
PREC="$PREC --enable_xla"
echo "XLA activated"
fi
mpi=""
if [ $num_gpus -gt 1 ] ; then
mpi="mpiexec --allow-run-as-root -np $num_gpus"
horovod="--use_horovod"
fi
#PHASE 1
train_steps_phase1=$(expr $train_steps \* 9 \/ 10) #Phase 1 is 10% of training
gbs_phase1=$(expr $train_batch_size_phase1 \* $num_accumulation_steps_phase1)
seq_len=128
max_pred_per_seq=20
RESULTS_DIR_PHASE1=${RESULTS_DIR}/phase_1
mkdir -m 777 -p $RESULTS_DIR_PHASE1
INPUT_FILES="$DATA_DIR/tfrecord/lower_case_1_seq_len_${seq_len}_max_pred_${max_pred_per_seq}_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/books_wiki_en_corpus/training/*"
EVAL_FILES="$DATA_DIR/tfrecord/lower_case_1_seq_len_${seq_len}_max_pred_${max_pred_per_seq}_masked_lm_prob_0.15_random_seed_12345_dupe_factor_5_shard_1472_test_split_10/books_wiki_en_corpus/test"
#Check if all necessary files are available before training
for DIR_or_file in $DATA_DIR $RESULTS_DIR_PHASE1 $BERT_CONFIG; do
if [ ! -d "$DIR_or_file" ] && [ ! -f "$DIR_or_file" ]; then
echo "Error! $DIR_or_file directory missing. Please mount correctly"
exit -1
fi
done
$mpi python /workspace/bert_tf2/run_pretraining.py \
--input_files=$INPUT_FILES \
--model_dir=$RESULTS_DIR_PHASE1 \
--bert_config_file=$BERT_CONFIG \
--train_batch_size=$train_batch_size_phase1 \
--max_seq_length=$seq_len \
--max_predictions_per_seq=$max_pred_per_seq \
--num_steps_per_epoch=$train_steps_phase1 --num_train_epochs=1 \
--steps_per_loop=$save_checkpoints_steps \
--save_checkpoint_steps=$save_checkpoints_steps \
--warmup_steps=$warmup_steps_phase1 \
--num_accumulation_steps=$num_accumulation_steps_phase1 \
--learning_rate=$learning_rate_phase1 \
--optimizer_type=LAMB \
$horovod $PREC
|
TensorFlow2/LanguageModeling/ELECTRA/data | data | WikiDownloader | # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bz2
import os
import urllib.request
import subprocess
import sys
class WikiDownloader:
def __init__(self, language, save_path):
self.save_path = save_path + '/wikicorpus_' + language
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.language = language
self.download_urls = {
'en' : 'https://dumps.wikimedia.org/enwiki/latest/enwiki-latest-pages-articles.xml.bz2',
'zh' : 'https://dumps.wikimedia.org/zhwiki/latest/zhwiki-latest-pages-articles.xml.bz2'
}
self.output_files = {
'en' : 'wikicorpus_en.xml.bz2',
'zh' : 'wikicorpus_zh.xml.bz2'
}
def download(self):
if self.language in self.download_urls:
url = self.download_urls[self.language]
filename = self.output_files[self.language]
print('Downloading:', url)
if os.path.isfile(self.save_path + '/' + filename):
print('** Download file already exists, skipping download')
else:
response = urllib.request.urlopen(url)
with open(self.save_path + '/' + filename, "wb") as handle:
handle.write(response.read())
# Always unzipping since this is relatively fast and will overwrite
print('Unzipping:', self.output_files[self.language])
subprocess.run('bzip2 -dk ' + self.save_path + '/' + filename, shell=True, check=True)
else:
assert False, 'WikiDownloader not implemented for this language yet.' |
PaddlePaddle/Classification/RN50v1.5/models | models | __init__ | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .resnet import ResNet50
|
PyTorch/Classification/GPUNet/triton/175ms/runner | runner | config_NVIDIA-DGX-1-(1x-V100-32GB) | batching: dynamic
checkpoints:
- name: 1.75ms
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_2_pyt_ckpt/versions/21.12.0_amp/zip
configurations:
- checkpoint: 1.75ms
parameters:
backend_accelerator: trt
checkpoint: 1.75ms
device_kind: gpu
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 64
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 0
torch_jit: none
container_version: '21.12'
datasets:
- name: imagenet
datasets_dir: datasets
ensemble_model_name: null
framework: PyTorch
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: GPUnet
performance_tool: model_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules | modules | layer_norm | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
TORCHSCRIPT = False
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting() or TORCHSCRIPT:
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | target_assigner_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.target_assigner."""
import numpy as np
import tensorflow as tf
from object_detection.box_coders import keypoint_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_list
from object_detection.core import region_similarity_calculator
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as targetassigner
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import test_case
class TargetAssignerTest(test_case.TestCase):
def test_assign_agnostic(self):
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.3)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9]], dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [0]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_agnostic_with_keypoints(self):
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_class_agnostic_with_keypoints_and_ignored_matches(self):
# Note: test is very similar to above. The third box matched with an IOU
# of 0.35, which is between the matched and unmatched threshold. This means
# That like above the expected classification targets are [1, 1, 0].
# Unlike above, the third target is ignored and therefore expected
# classification weights are [1, 1, 0].
def graph_fn(anchor_means, groundtruth_box_corners,
groundtruth_keypoints):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = keypoint_box_coder.KeypointBoxCoder(
num_keypoints=6, scale_factors=[10.0, 10.0, 5.0, 5.0])
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
groundtruth_boxlist.add_field(fields.BoxListFields.keypoints,
groundtruth_keypoints)
result = target_assigner.assign(
anchors_boxlist, groundtruth_boxlist, unmatched_class_label=None)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, .9, 1.0]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.45, 0.45, 0.95, 0.95]],
dtype=np.float32)
groundtruth_keypoints = np.array(
[[[0.1, 0.2], [0.1, 0.3], [0.2, 0.2], [0.2, 0.2], [0.1, 0.1], [0.9, 0]],
[[0, 0.3], [0.2, 0.4], [0.5, 0.6], [0, 0.6], [0.8, 0.2], [0.2, 0.4]]],
dtype=np.float32)
exp_cls_targets = [[1], [1], [0]]
exp_cls_weights = [[1], [1], [1]]
exp_reg_targets = [[0, 0, 0, 0, -3, -1, -3, 1, -1, -1, -1, -1, -3, -3, 13,
-5],
[-1, -1, 0, 0, -15, -9, -11, -7, -5, -3, -15, -3, 1, -11,
-11, -7],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
exp_reg_weights = [1, 1, 0]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [anchor_means,
groundtruth_box_corners,
groundtruth_keypoints])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
exp_cls_targets = [[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]]
exp_cls_weights = [[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_multiclass_with_groundtruth_weights(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label,
groundtruth_weights=groundtruth_weights)
(_, cls_weights, _, reg_weights, _) = result
return (cls_weights, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], dtype=np.float32)
groundtruth_weights = np.array([0.3, 0., 0.5], dtype=np.float32)
# background class gets weight of 1.
exp_cls_weights = [[0.3, 0.3, 0.3, 0.3, 0.3, 0.3, 0.3],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]]
exp_reg_weights = [0.3, 0., 0., 0.5] # background class gets weight of 0.
(cls_weights_out, reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_box_corners, groundtruth_labels,
groundtruth_weights
])
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_assign_multidimensional_class_targets(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([[0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]], dtype=np.float32)
groundtruth_box_corners = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]], dtype=np.float32)
groundtruth_labels = np.array([[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 1], [1, .5]]], np.float32)
exp_cls_targets = [[[0, 1], [1, 0]],
[[1, 0], [0, 1]],
[[0, 0], [0, 0]],
[[0, 1], [1, .5]]]
exp_cls_weights = [[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, -1, 1],
[0, 0, 0, 0],
[0, 0, -.5, .2]]
exp_reg_weights = [1, 1, 0, 1]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, groundtruth_labels):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
unmatched_class_label = tf.constant([0, 0, 0], tf.float32)
anchors_boxlist = box_list.BoxList(anchor_means)
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
result = target_assigner.assign(
anchors_boxlist,
groundtruth_boxlist,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
(cls_targets, cls_weights, reg_targets, reg_weights, _) = result
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
groundtruth_labels = np.zeros((0, 3), dtype=np.float32)
anchor_means = np.array([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]],
dtype=np.float32)
exp_cls_targets = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
exp_cls_weights = [[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]
exp_reg_targets = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
exp_reg_weights = [0, 0, 0, 0]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, groundtruth_labels])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
self.assertEquals(cls_targets_out.dtype, np.float32)
self.assertEquals(cls_weights_out.dtype, np.float32)
self.assertEquals(reg_targets_out.dtype, np.float32)
self.assertEquals(reg_weights_out.dtype, np.float32)
def test_raises_error_on_incompatible_groundtruth_boxes_and_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
unmatched_class_label = tf.constant([1, 0, 0, 0, 0, 0, 0], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 1.0, 0.8],
[0, 0.5, .5, 1.0],
[.75, 0, 1.0, .25]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.5, 0.8],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]], tf.float32)
with self.assertRaisesRegexp(ValueError, 'Unequal shapes'):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
def test_raises_error_on_invalid_groundtruth_labels(self):
similarity_calc = region_similarity_calculator.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=1.0)
unmatched_class_label = tf.constant([[0, 0], [0, 0], [0, 0]], tf.float32)
target_assigner = targetassigner.TargetAssigner(
similarity_calc, matcher, box_coder)
prior_means = tf.constant([[0.0, 0.0, 0.5, 0.5]])
priors = box_list.BoxList(prior_means)
box_corners = [[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[.75, 0, .95, .27]]
boxes = box_list.BoxList(tf.constant(box_corners))
groundtruth_labels = tf.constant([[[0, 1], [1, 0]]], tf.float32)
with self.assertRaises(ValueError):
target_assigner.assign(
priors,
boxes,
groundtruth_labels,
unmatched_class_label=unmatched_class_label)
class BatchTargetAssignerTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
agnostic_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [1], [1], [1]],
[[1], [1], [1], [1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multiclass_targets_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label, gt_weights)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]],
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_multidimensional_targets(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_targets = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist, gt_box_batch,
gt_class_targets, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.]]],
[[[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 1., 0.]],
[[0., 0., 1.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 0.]]]]
exp_cls_weights = [[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]],
[[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]],
[[1., 1., 1.],
[1., 1., 1.]]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_targets):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_targets_batch = [gt_class_targets]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_targets(
multiclass_target_assigner, anchors_boxlist,
gt_box_batch, gt_class_targets_batch, unmatched_class_label)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 1, 1],
[1, 1, 1, 1]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
num_classes = 3
pad = 1
gt_class_targets = np.zeros((0, num_classes + pad), dtype=np.float32)
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_box_corners, gt_class_targets])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
class BatchTargetAssignConfidencesTest(test_case.TestCase):
def _get_target_assigner(self):
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.5)
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder(stddev=0.1)
return targetassigner.TargetAssigner(similarity_calc, matcher, box_coder)
def test_batch_assign_empty_groundtruth(self):
def graph_fn(anchor_means, groundtruth_box_corners, gt_class_confidences):
groundtruth_boxlist = box_list.BoxList(groundtruth_box_corners)
gt_box_batch = [groundtruth_boxlist]
gt_class_confidences_batch = [gt_class_confidences]
anchors_boxlist = box_list.BoxList(anchor_means)
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
multiclass_target_assigner = self._get_target_assigner()
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_box_corners = np.zeros((0, 4), dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1]], dtype=np.float32)
num_classes = 3
pad = 1
gt_class_confidences = np.zeros((0, num_classes + pad), dtype=np.float32)
exp_cls_targets = [[[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[0, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn,
[anchor_means, groundtruth_box_corners, gt_class_confidences])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_agnostic(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [None, None]
anchors_boxlist = box_list.BoxList(anchor_means)
agnostic_target_assigner = self._get_target_assigner()
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
agnostic_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
include_background_class=False,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[1], [0], [0], [0]],
[[0], [1], [1], [0]]]
exp_cls_weights = [[[1], [0.5], [0.5], [0.5]],
[[0.5], [1], [1], [0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0.15789001, -0.01500003, 0.57889998, -1.15799987],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 1, 0]]
(cls_targets_out,
cls_weights_out, reg_targets_out, reg_weights_out) = self.execute(
graph_fn, [anchor_means, groundtruth_boxlist1, groundtruth_boxlist2])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
implicit_class_weight = 0.5
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multiclass_with_padded_groundtruth(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
gt_weights = [groundtruth_weights1, groundtruth_weights2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
num_classes = 3
unmatched_class_label = tf.constant([1] + num_classes * [0], tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
gt_weights,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2],
[0., 0., 0., 0.]], dtype=np.float32)
groundtruth_weights1 = np.array([1, 0], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842],
[0, 0, 0, 0]],
dtype=np.float32)
groundtruth_weights2 = np.array([1, 1, 0], dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 0, 0, 0]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
exp_cls_targets = [[[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0]],
[[1, 0, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[1, 0, 0, 0]]]
exp_cls_weights = [[[1, 1, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5]],
[[0.5, 0.5, 0.5, 0.5],
[1, 0.5, 0.5, 1],
[0.5, 0.5, 1, 0.5],
[0.5, 0.5, 0.5, 0.5]]]
exp_reg_targets = [[[0, 0, -0.5, -0.5],
[0, 0, 0, 0],
[0, 0, 0, 0,],
[0, 0, 0, 0,],],
[[0, 0, 0, 0,],
[0, 0.01231521, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]]
exp_reg_weights = [[1, 0, 0, 0],
[0, 1, 0, 0]]
(cls_targets_out, cls_weights_out, reg_targets_out,
reg_weights_out) = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2, groundtruth_weights1,
groundtruth_weights2
])
self.assertAllClose(cls_targets_out, exp_cls_targets)
self.assertAllClose(cls_weights_out, exp_cls_weights)
self.assertAllClose(reg_targets_out, exp_reg_targets)
self.assertAllClose(reg_weights_out, exp_reg_weights)
def test_batch_assign_confidences_multidimensional(self):
def graph_fn(anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2):
box_list1 = box_list.BoxList(groundtruth_boxlist1)
box_list2 = box_list.BoxList(groundtruth_boxlist2)
gt_box_batch = [box_list1, box_list2]
gt_class_confidences_batch = [class_targets1, class_targets2]
anchors_boxlist = box_list.BoxList(anchor_means)
multiclass_target_assigner = self._get_target_assigner()
target_dimensions = (2, 3)
unmatched_class_label = tf.constant(np.zeros(target_dimensions),
tf.float32)
implicit_class_weight = 0.5
(cls_targets, cls_weights, reg_targets, reg_weights,
_) = targetassigner.batch_assign_confidences(
multiclass_target_assigner,
anchors_boxlist,
gt_box_batch,
gt_class_confidences_batch,
unmatched_class_label=unmatched_class_label,
include_background_class=True,
implicit_class_weight=implicit_class_weight)
return (cls_targets, cls_weights, reg_targets, reg_weights)
groundtruth_boxlist1 = np.array([[0., 0., 0.2, 0.2]], dtype=np.float32)
groundtruth_boxlist2 = np.array([[0, 0.25123152, 1, 1],
[0.015789, 0.0985, 0.55789, 0.3842]],
dtype=np.float32)
class_targets1 = np.array([[0, 1, 0, 0]], dtype=np.float32)
class_targets2 = np.array([[0, 0, 0, 1],
[0, 0, 1, 0]], dtype=np.float32)
class_targets1 = np.array([[[0, 1, 1],
[1, 1, 0]]], dtype=np.float32)
class_targets2 = np.array([[[0, 1, 1],
[1, 1, 0]],
[[0, 0, 1],
[0, 0, 1]]], dtype=np.float32)
anchor_means = np.array([[0, 0, .25, .25],
[0, .25, 1, 1],
[0, .1, .5, .5],
[.75, .75, 1, 1]], dtype=np.float32)
with self.assertRaises(ValueError):
_, _, _, _ = self.execute(graph_fn, [
anchor_means, groundtruth_boxlist1, groundtruth_boxlist2,
class_targets1, class_targets2
])
class CreateTargetAssignerTest(tf.test.TestCase):
def test_create_target_assigner(self):
"""Tests that named constructor gives working target assigners.
TODO(rathodv): Make this test more general.
"""
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
multibox_ta = (targetassigner
.create_target_assigner('Multibox', stage='proposal'))
multibox_ta.assign(priors, groundtruth)
# No tests on output, as that may vary arbitrarily as new target assigners
# are added. As long as it is constructed correctly and runs without errors,
# tests on the individual assigners cover correctness of the assignments.
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='proposal'))
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = (targetassigner
.create_target_assigner('FastRCNN'))
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = (targetassigner
.create_target_assigner('FasterRCNN',
stage='detection'))
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector',
stage='invalid_stage')
if __name__ == '__main__':
tf.test.main()
|
PaddlePaddle/LanguageModeling/BERT | BERT | loss | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
class CrossEntropyLossForSQuAD(paddle.nn.Layer):
"""
Loss function for SQuAD
"""
def __init__(self):
super().__init__()
def forward(self, y, label):
start_logits, end_logits = y
start_position, end_position = label
start_position = paddle.unsqueeze(start_position, axis=-1)
end_position = paddle.unsqueeze(end_position, axis=-1)
start_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=start_logits, label=start_position, soft_label=False)
start_loss = paddle.mean(start_loss)
end_loss = paddle.nn.functional.softmax_with_cross_entropy(
logits=end_logits, label=end_position, soft_label=False)
end_loss = paddle.mean(end_loss)
loss = (start_loss + end_loss) / 2
return loss
class BertPretrainingCriterion(paddle.nn.Layer):
"""
Loss function for BertPretraining.
Args:
vocab_size(int):
Vocabulary size of `inputs_ids` in `BertModel`.
"""
def __init__(self, vocab_size):
super().__init__()
self.loss_fn = paddle.nn.loss.CrossEntropyLoss(ignore_index=-1)
self.vocab_size = vocab_size
def forward(self, prediction_scores, seq_relationship_score,
masked_lm_labels, next_sentence_labels):
"""
Args:
prediction_scores(Tensor):
The scores of masked token prediction. Its data type should be float32.
If `masked_positions` is None, its shape is [batch_size, sequence_length, vocab_size].
Otherwise, its shape is [batch_size, mask_token_num, vocab_size]
seq_relationship_score(Tensor):
The scores of next sentence prediction. Its data type should be float32 and
its shape is [batch_size, 2]
masked_lm_labels(Tensor):
The labels of the masked language modeling, its dimensionality is equal to `prediction_scores`.
Its data type should be int64. If `masked_positions` is None, its shape is [batch_size, sequence_length, 1].
Otherwise, its shape is [batch_size, mask_token_num, 1]
next_sentence_labels(Tensor):
The labels of the next sentence prediction task, the dimensionality of `next_sentence_labels`
is equal to `seq_relation_labels`. Its data type should be int64 and
its shape is [batch_size, 1]
masked_lm_scale(Tensor or int):
The scale of masked tokens. Used for the normalization of masked language modeling loss.
If it is a `Tensor`, its data type should be int64 and its shape is equal to `prediction_scores`.
Returns:
Tensor: The pretraining loss, equals to the sum of `masked_lm_loss` plus the mean of `next_sentence_loss`.
Its data type should be float32 and its shape is [1].
"""
with paddle.static.amp.fp16_guard():
masked_lm_labels_flat = masked_lm_labels.reshape([-1])
mlm_labels = masked_lm_labels_flat[masked_lm_labels_flat != -1]
masked_lm_loss = self.loss_fn(prediction_scores, mlm_labels)
if next_sentence_labels.ndim == 1:
next_sentence_labels = next_sentence_labels.unsqueeze(axis=-1)
next_sentence_loss = self.loss_fn(seq_relationship_score,
next_sentence_labels)
return masked_lm_loss + next_sentence_loss
|
TensorFlow2/Recommendation/WideAndDeep/triton | triton | metrics | #!/usr/bin/env python3
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional
import numpy as np
import tensorflow as tf
from triton.deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, *, output_used_for_metrics: str):
self.output_used_for_metrics = output_used_for_metrics
self._ids = None
self._y_pred = None
self._y_real = None
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
y_real = y_real[self.output_used_for_metrics]
y_pred = y_pred[self.output_used_for_metrics]
def _concat_batches(b1, b2):
if b1 is None:
return b2
else:
return np.concatenate([b1, b2], axis=0)
self._ids = _concat_batches(self._ids, ids)
self._y_real = _concat_batches(self._y_real, y_real)
self._y_pred = _concat_batches(self._y_pred, y_pred)
@property
def metrics(self) -> Dict[str, Any]:
metrics = {"map12": self.get_map12(self._ids, self._y_pred, self._y_real)}
return metrics
def get_map12(self, ids, y_pred, y_real):
with tf.device("/cpu:0"):
predictions = tf.reshape(y_pred, [-1])
predictions = tf.cast(predictions, tf.float64)
display_ids = tf.reshape(ids, [-1])
labels = tf.reshape(y_real, [-1])
sorted_ids = tf.argsort(display_ids)
display_ids = tf.gather(display_ids, indices=sorted_ids)
predictions = tf.gather(predictions, indices=sorted_ids)
labels = tf.gather(labels, indices=sorted_ids)
_, display_ids_idx, display_ids_ads_count = tf.unique_with_counts(display_ids, out_idx=tf.int64)
pad_length = 30 - tf.reduce_max(display_ids_ads_count)
preds = tf.RaggedTensor.from_value_rowids(predictions, display_ids_idx).to_tensor()
labels = tf.RaggedTensor.from_value_rowids(labels, display_ids_idx).to_tensor()
labels_mask = tf.math.reduce_max(labels, 1)
preds_masked = tf.boolean_mask(preds, labels_mask)
labels_masked = tf.boolean_mask(labels, labels_mask)
labels_masked = tf.argmax(labels_masked, axis=1, output_type=tf.int32)
labels_masked = tf.reshape(labels_masked, [-1, 1])
preds_masked = tf.pad(preds_masked, [(0, 0), (0, pad_length)])
_, predictions_idx = tf.math.top_k(preds_masked, 12)
indices = tf.math.equal(predictions_idx, labels_masked)
indices_mask = tf.math.reduce_any(indices, 1)
masked_indices = tf.boolean_mask(indices, indices_mask)
res = tf.argmax(masked_indices, axis=1)
ap_matrix = tf.divide(1, tf.add(res, 1))
ap_sum = tf.reduce_sum(ap_matrix)
shape = tf.cast(tf.shape(indices)[0], tf.float64)
return (ap_sum / shape).numpy()
|
CUDA-Optimized/FastSpeech/fastspeech/text_norm | text_norm | __init__ | # Copyright (c) 2017 Keith Ito
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" from https://github.com/keithito/tacotron """
import re
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
# print(m)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | preprocessor_builder | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builder for preprocessing steps."""
import tensorflow as tf
from object_detection.core import preprocessor
from object_detection.protos import preprocessor_pb2
def _get_step_config_from_proto(preprocessor_step_config, step_name):
"""Returns the value of a field named step_name from proto.
Args:
preprocessor_step_config: A preprocessor_pb2.PreprocessingStep object.
step_name: Name of the field to get value from.
Returns:
result_dict: a sub proto message from preprocessor_step_config which will be
later converted to a dictionary.
Raises:
ValueError: If field does not exist in proto.
"""
for field, value in preprocessor_step_config.ListFields():
if field.name == step_name:
return value
raise ValueError('Could not get field %s from proto!', step_name)
def _get_dict_from_proto(config):
"""Helper function to put all proto fields into a dictionary.
For many preprocessing steps, there's an trivial 1-1 mapping from proto fields
to function arguments. This function automatically populates a dictionary with
the arguments from the proto.
Protos that CANNOT be trivially populated include:
* nested messages.
* steps that check if an optional field is set (ie. where None != 0).
* protos that don't map 1-1 to arguments (ie. list should be reshaped).
* fields requiring additional validation (ie. repeated field has n elements).
Args:
config: A protobuf object that does not violate the conditions above.
Returns:
result_dict: |config| converted into a python dictionary.
"""
result_dict = {}
for field, value in config.ListFields():
result_dict[field.name] = value
return result_dict
# A map from a PreprocessingStep proto config field name to the preprocessing
# function that should be used. The PreprocessingStep proto should be parsable
# with _get_dict_from_proto.
PREPROCESSING_FUNCTION_MAP = {
'normalize_image':
preprocessor.normalize_image,
'random_pixel_value_scale':
preprocessor.random_pixel_value_scale,
'random_image_scale':
preprocessor.random_image_scale,
'random_rgb_to_gray':
preprocessor.random_rgb_to_gray,
'random_adjust_brightness':
preprocessor.random_adjust_brightness,
'random_adjust_contrast':
preprocessor.random_adjust_contrast,
'random_adjust_hue':
preprocessor.random_adjust_hue,
'random_adjust_saturation':
preprocessor.random_adjust_saturation,
'random_distort_color':
preprocessor.random_distort_color,
'random_jitter_boxes':
preprocessor.random_jitter_boxes,
'random_crop_to_aspect_ratio':
preprocessor.random_crop_to_aspect_ratio,
'random_black_patches':
preprocessor.random_black_patches,
'rgb_to_gray':
preprocessor.rgb_to_gray,
'scale_boxes_to_pixel_coordinates': (
preprocessor.scale_boxes_to_pixel_coordinates),
'subtract_channel_mean':
preprocessor.subtract_channel_mean,
'convert_class_logits_to_softmax':
preprocessor.convert_class_logits_to_softmax,
}
# A map to convert from preprocessor_pb2.ResizeImage.Method enum to
# tf.image.ResizeMethod.
RESIZE_METHOD_MAP = {
preprocessor_pb2.ResizeImage.AREA: tf.image.ResizeMethod.AREA,
preprocessor_pb2.ResizeImage.BICUBIC: tf.image.ResizeMethod.BICUBIC,
preprocessor_pb2.ResizeImage.BILINEAR: tf.image.ResizeMethod.BILINEAR,
preprocessor_pb2.ResizeImage.NEAREST_NEIGHBOR: (
tf.image.ResizeMethod.NEAREST_NEIGHBOR),
}
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof('preprocessing_step')
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config,
step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == 'random_horizontal_flip':
config = preprocessor_step_config.random_horizontal_flip
return (preprocessor.random_horizontal_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_vertical_flip':
config = preprocessor_step_config.random_vertical_flip
return (preprocessor.random_vertical_flip,
{
'keypoint_flip_permutation': tuple(
config.keypoint_flip_permutation),
})
if step_type == 'random_rotation90':
return (preprocessor.random_rotation90, {})
if step_type == 'random_crop_image':
config = preprocessor_step_config.random_crop_image
return (preprocessor.random_crop_image,
{
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
})
if step_type == 'random_pad_image':
config = preprocessor_step_config.random_pad_image
min_image_size = None
if (config.HasField('min_image_height') !=
config.HasField('min_image_width')):
raise ValueError('min_image_height and min_image_width should be either '
'both set or both unset.')
if config.HasField('min_image_height'):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if (config.HasField('max_image_height') !=
config.HasField('max_image_width')):
raise ValueError('max_image_height and max_image_width should be either '
'both set or both unset.')
if config.HasField('max_image_height'):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color or None
if pad_color:
if len(pad_color) == 3:
pad_color = tf.to_float([x for x in config.pad_color])
else:
raise ValueError('pad_color should have 3 elements (RGB) if set!')
return (preprocessor.random_pad_image,
{
'min_image_size': min_image_size,
'max_image_size': max_image_size,
'pad_color': pad_color,
})
if step_type == 'random_crop_pad_image':
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError('pad_color should have 3 elements if set!')
kwargs = {
'min_object_covered': config.min_object_covered,
'aspect_ratio_range': (config.min_aspect_ratio,
config.max_aspect_ratio),
'area_range': (config.min_area, config.max_area),
'overlap_thresh': config.overlap_thresh,
'clip_boxes': config.clip_boxes,
'random_coef': config.random_coef,
}
if min_padded_size_ratio:
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if pad_color:
kwargs['pad_color'] = tuple(pad_color)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == 'random_resize_method':
config = preprocessor_step_config.random_resize_method
return (preprocessor.random_resize_method,
{
'target_size': [config.target_height, config.target_width],
})
if step_type == 'resize_image':
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (preprocessor.resize_image,
{
'new_height': config.new_height,
'new_width': config.new_width,
'method': method
})
if step_type == 'ssd_random_crop':
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop, {})
if step_type == 'ssd_random_crop_pad':
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [tuple(op.min_padded_size_ratio)
for op in config.operations]
max_padded_size_ratio = [tuple(op.max_padded_size_ratio)
for op in config.operations]
pad_color = [(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations]
return (preprocessor.ssd_random_crop_pad,
{
'min_object_covered': min_object_covered,
'aspect_ratio_range': aspect_ratio_range,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
'min_padded_size_ratio': min_padded_size_ratio,
'max_padded_size_ratio': max_padded_size_ratio,
'pad_color': pad_color,
})
return (preprocessor.ssd_random_crop_pad, {})
if step_type == 'ssd_random_crop_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
clip_boxes = [op.clip_boxes for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
'min_object_covered': min_object_covered,
'aspect_ratio': config.aspect_ratio,
'area_range': area_range,
'overlap_thresh': overlap_thresh,
'clip_boxes': clip_boxes,
'random_coef': random_coef,
})
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == 'ssd_random_crop_pad_fixed_aspect_ratio':
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
kwargs = {}
aspect_ratio = config.aspect_ratio
if aspect_ratio:
kwargs['aspect_ratio'] = aspect_ratio
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio:
if len(min_padded_size_ratio) != 2:
raise ValueError('min_padded_size_ratio should have 2 elements if set!')
kwargs['min_padded_size_ratio'] = tuple(min_padded_size_ratio)
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio:
if len(max_padded_size_ratio) != 2:
raise ValueError('max_padded_size_ratio should have 2 elements if set!')
kwargs['max_padded_size_ratio'] = tuple(max_padded_size_ratio)
if config.operations:
kwargs['min_object_covered'] = [op.min_object_covered
for op in config.operations]
kwargs['aspect_ratio_range'] = [(op.min_aspect_ratio, op.max_aspect_ratio)
for op in config.operations]
kwargs['area_range'] = [(op.min_area, op.max_area)
for op in config.operations]
kwargs['overlap_thresh'] = [op.overlap_thresh for op in config.operations]
kwargs['clip_boxes'] = [op.clip_boxes for op in config.operations]
kwargs['random_coef'] = [op.random_coef for op in config.operations]
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, kwargs)
raise ValueError('Unknown preprocessing step.')
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs | feature_specs | 13_num_26_cat | channel_spec:
categorical:
- cat_0.bin
- cat_1.bin
- cat_2.bin
- cat_3.bin
- cat_4.bin
- cat_5.bin
- cat_6.bin
- cat_7.bin
- cat_8.bin
- cat_9.bin
- cat_10.bin
- cat_11.bin
- cat_12.bin
- cat_13.bin
- cat_14.bin
- cat_15.bin
- cat_16.bin
- cat_17.bin
- cat_18.bin
- cat_19.bin
- cat_20.bin
- cat_21.bin
- cat_22.bin
- cat_23.bin
- cat_24.bin
- cat_25.bin
label:
- label
numerical: &id001
- num_0
- num_1
- num_2
- num_3
- num_4
- num_5
- num_6
- num_7
- num_8
- num_9
- num_10
- num_11
- num_12
feature_spec:
cat_0.bin:
cardinality: 100000
dtype: int32
cat_1.bin:
cardinality: 100001
dtype: int32
cat_10.bin:
cardinality: 100010
dtype: int32
cat_11.bin:
cardinality: 100011
dtype: int32
cat_12.bin:
cardinality: 100012
dtype: int32
cat_13.bin:
cardinality: 100013
dtype: int32
cat_14.bin:
cardinality: 100014
dtype: int32
cat_15.bin:
cardinality: 100015
dtype: int32
cat_16.bin:
cardinality: 100016
dtype: int32
cat_17.bin:
cardinality: 100017
dtype: int32
cat_18.bin:
cardinality: 100018
dtype: int32
cat_19.bin:
cardinality: 100019
dtype: int32
cat_2.bin:
cardinality: 100002
dtype: int32
cat_20.bin:
cardinality: 100020
dtype: int32
cat_21.bin:
cardinality: 100021
dtype: int32
cat_22.bin:
cardinality: 100022
dtype: int32
cat_23.bin:
cardinality: 100023
dtype: int32
cat_24.bin:
cardinality: 100024
dtype: int32
cat_25.bin:
cardinality: 100025
dtype: int32
cat_3.bin:
cardinality: 100003
dtype: int32
cat_4.bin:
cardinality: 100004
dtype: int32
cat_5.bin:
cardinality: 100005
dtype: int32
cat_6.bin:
cardinality: 100006
dtype: int32
cat_7.bin:
cardinality: 100007
dtype: int32
cat_8.bin:
cardinality: 100008
dtype: int32
cat_9.bin:
cardinality: 100009
dtype: int32
label:
dtype: bool
num_0:
dtype: float16
num_1:
dtype: float16
num_10:
dtype: float16
num_11:
dtype: float16
num_12:
dtype: float16
num_2:
dtype: float16
num_3:
dtype: float16
num_4:
dtype: float16
num_5:
dtype: float16
num_6:
dtype: float16
num_7:
dtype: float16
num_8:
dtype: float16
num_9:
dtype: float16
metadata: {}
source_spec:
test:
- features: *id001
files:
- test/numerical.bin
type: split_binary
- features:
- label
files:
- test/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- test/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- test/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- test/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- test/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- test/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- test/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- test/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- test/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- test/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- test/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- test/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- test/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- test/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- test/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- test/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- test/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- test/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- test/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- test/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- test/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- test/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- test/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- test/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- test/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- test/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- test/cat_25.bin
type: split_binary
train:
- features: *id001
files:
- train/numerical.bin
type: split_binary
- features:
- label
files:
- train/label.bin
type: split_binary
- features:
- cat_0.bin
files:
- train/cat_0.bin
type: split_binary
- features:
- cat_1.bin
files:
- train/cat_1.bin
type: split_binary
- features:
- cat_2.bin
files:
- train/cat_2.bin
type: split_binary
- features:
- cat_3.bin
files:
- train/cat_3.bin
type: split_binary
- features:
- cat_4.bin
files:
- train/cat_4.bin
type: split_binary
- features:
- cat_5.bin
files:
- train/cat_5.bin
type: split_binary
- features:
- cat_6.bin
files:
- train/cat_6.bin
type: split_binary
- features:
- cat_7.bin
files:
- train/cat_7.bin
type: split_binary
- features:
- cat_8.bin
files:
- train/cat_8.bin
type: split_binary
- features:
- cat_9.bin
files:
- train/cat_9.bin
type: split_binary
- features:
- cat_10.bin
files:
- train/cat_10.bin
type: split_binary
- features:
- cat_11.bin
files:
- train/cat_11.bin
type: split_binary
- features:
- cat_12.bin
files:
- train/cat_12.bin
type: split_binary
- features:
- cat_13.bin
files:
- train/cat_13.bin
type: split_binary
- features:
- cat_14.bin
files:
- train/cat_14.bin
type: split_binary
- features:
- cat_15.bin
files:
- train/cat_15.bin
type: split_binary
- features:
- cat_16.bin
files:
- train/cat_16.bin
type: split_binary
- features:
- cat_17.bin
files:
- train/cat_17.bin
type: split_binary
- features:
- cat_18.bin
files:
- train/cat_18.bin
type: split_binary
- features:
- cat_19.bin
files:
- train/cat_19.bin
type: split_binary
- features:
- cat_20.bin
files:
- train/cat_20.bin
type: split_binary
- features:
- cat_21.bin
files:
- train/cat_21.bin
type: split_binary
- features:
- cat_22.bin
files:
- train/cat_22.bin
type: split_binary
- features:
- cat_23.bin
files:
- train/cat_23.bin
type: split_binary
- features:
- cat_24.bin
files:
- train/cat_24.bin
type: split_binary
- features:
- cat_25.bin
files:
- train/cat_25.bin
type: split_binary
|
TensorFlow/LanguageModeling/Transformer-XL/tf | tf | vocabulary | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter, OrderedDict
import numpy as np
import tensorflow as tf
from tensorflow.gfile import Open as open
from tensorflow.gfile import Exists as exists
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose: print('counting file {} ...'.format(path))
assert exists(path)
sents = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose: print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose: print('encoding file {} ...'.format(path))
assert exists(path)
encoded = []
with open(path, 'r') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_nparray(symbols))
if ordered:
encoded = np.concatenate(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose: print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_nparray(symbols))
if ordered:
encoded = np.concatenate(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_nparray(self, symbols):
nparray = np.array(self.get_indices(symbols), dtype=np.int64)
return nparray
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
|
PyTorch/SpeechSynthesis/FastPitch/hifigan | hifigan | models_ch_last_ | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d, ConvTranspose2d, AvgPool2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from common.utils import init_weights, get_padding, print_once
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[0], 1),
padding=(get_padding(kernel_size, dilation[0]), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[1], 1),
padding=(get_padding(kernel_size, dilation[1]), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[2], 1),
padding=(get_padding(kernel_size, dilation[2]), 0)))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv2d(80, h.upsample_initial_channel, (7,1), (1,1), padding=(3,0)))
assert h.resblock == '1', 'Only ResBlock1 currently supported for NHWC'
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
# ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
# k, u, padding=(k-u)//2)))
ConvTranspose2d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
(k, 1), (u, 1), padding=((k-u)//2, 0))))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv2d(ch, 1, (7,1), (1,1), padding=(3,0)))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = x.unsqueeze(-1).to(memory_format=torch.channels_last)
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
# x = self.ups[i](x.unsqueeze(-1)).squeeze(-1)
x = self.ups[i](x)
xs = 0
for j in range(self.num_kernels):
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
x = x.squeeze(-1)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t, unit = x.shape
assert unit == 1
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, 0, 0, n_pad), "reflect")
t = t + n_pad
# print_once('x pre channels last:', x.is_contiguous(memory_format=torch.channels_last))
x = x.view(b, c, t // self.period, self.period)
# print_once('x post channels last:', x.is_contiguous(memory_format=torch.channels_last))
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class DiscriminatorPConv1d(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorPConv1d, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0), dilation=(period, 1))),
])
# self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1, dilation=period))
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0), dilation=(period, 1)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t, unit = x.shape
assert unit == 1
# if t % self.period != 0: # pad first
# n_pad = self.period - (t % self.period)
# x = F.pad(x, (0, n_pad), "reflect")
# t = t + n_pad
# x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, periods, use_conv1d=False, shared=False):
super(MultiPeriodDiscriminator, self).__init__()
print('MPD PERIODS:', periods)
if use_conv1d:
print('Constructing dilated MPD')
layers = [DiscriminatorPConv1d(p) for p in periods]
else:
layers = [DiscriminatorP(p) for p in periods]
if shared:
print('MPD HAS SHARED PARAMS')
for l in layers[1:]:
l.share_params_of(layers[0])
self.discriminators = nn.ModuleList(layers)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False, amp_groups=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
# self.convs = nn.ModuleList([
# norm_f(Conv1d(1, 128, 15, 1, padding=7)),
# norm_f(Conv1d(128, 128, 41, 2, groups=1 if amp_groups else 4, padding=20)), # was: groups=4
# norm_f(Conv1d(128, 256, 41, 2, groups=1 if amp_groups else 16, padding=20)), # was: groups=16
# norm_f(Conv1d(256, 512, 41, 4, groups=1 if amp_groups else 16, padding=20)), # was: groups=16
# norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
# norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
# norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
# ])
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 128, (15,1), (1,1), padding=(7 , 0))),
norm_f(Conv2d(128, 128, (41,1), (2,1), groups=1 if amp_groups else 4, padding=(20, 0))), # was: groups=4
norm_f(Conv2d(128, 256, (41,1), (2,1), groups=1 if amp_groups else 16, padding=(20, 0))), # was: groups=16
norm_f(Conv2d(256, 512, (41,1), (4,1), groups=1 if amp_groups else 16, padding=(20, 0))), # was: groups=16
norm_f(Conv2d(512, 1024, (41,1), (4,1), groups=16 , padding=(20, 0))),
norm_f(Conv2d(1024, 1024, (41,1), (1,1), groups=16 , padding=(20, 0))),
norm_f(Conv2d(1024, 1024, ( 5,1), (1,1), padding=(2 , 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3,1), (1,1), padding=(1,0)))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = x.squeeze(-1)
# x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self, amp_groups=False):
super(MultiScaleDiscriminator, self).__init__()
if amp_groups:
print('MSD: AMP groups')
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, amp_groups=amp_groups),
DiscriminatorS(amp_groups=amp_groups),
DiscriminatorS(amp_groups=amp_groups),
])
self.meanpools = nn.ModuleList([
AvgPool2d((4, 1), (2, 1), padding=(1, 0)),
AvgPool2d((4, 1), (2, 1), padding=(1, 0))
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g, keys=[]):
loss = 0
meta = {}
assert len(keys) == len(fmap_r)
for key, dr, dg in zip(keys, fmap_r, fmap_g):
k = 'loss_gen_feat_' + key
meta[k] = 0
for rl, gl in zip(dr, dg):
# loss += torch.mean(torch.abs(rl - gl))
diff = torch.mean(torch.abs(rl - gl))
loss += diff
meta[k] += diff.item()
return loss*2, meta
def discriminator_loss(disc_real_outputs, disc_generated_outputs, keys=[]):
loss = 0
r_losses = []
g_losses = []
meta = {}
assert len(keys) == len(disc_real_outputs)
for key, dr, dg in zip(keys, disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
meta['loss_disc_real_' + key] = r_loss.item()
meta['loss_disc_gen_' + key] = g_loss.item()
return loss, r_losses, g_losses, meta
def generator_loss(disc_outputs, keys=[]):
loss = 0
gen_losses = []
meta = {}
assert len(keys) == len(disc_outputs)
for key, dg in zip(keys, disc_outputs):
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
meta['loss_gen_' + key] = l.item()
return loss, gen_losses, meta
|
PyTorch/SpeechSynthesis/HiFiGAN/common/text | text | text_processing | """ adapted from https://github.com/keithito/tacotron """
import re
import numpy as np
from . import cleaners
from .symbols import get_symbols
from . import cmudict
from .numerical import _currency_re, _expand_currency
#########
# REGEX #
#########
# Regular expression matching text enclosed in curly braces for encoding
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching words and not words
_words_re = re.compile(r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)")
# Regular expression separating words enclosed in curly braces for cleaning
_arpa_re = re.compile(r'{[^}]+}|\S+')
class TextProcessing(object):
def __init__(self, symbol_set, cleaner_names, p_arpabet=0.0,
handle_arpabet='word', handle_arpabet_ambiguous='ignore',
expand_currency=True):
self.symbols = get_symbols(symbol_set)
self.cleaner_names = cleaner_names
# Mappings from symbol to numeric ID and vice versa:
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
self.expand_currency = expand_currency
# cmudict
self.p_arpabet = p_arpabet
self.handle_arpabet = handle_arpabet
self.handle_arpabet_ambiguous = handle_arpabet_ambiguous
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self.symbols_to_sequence(text)
break
sequence += self.symbols_to_sequence(m.group(1))
sequence += self.arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(self, sequence):
result = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def clean_text(self, text):
for name in self.cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if s in self.symbol_to_id]
def arpabet_to_sequence(self, text):
return self.symbols_to_sequence(['@' + s for s in text.split()])
def get_arpabet(self, word):
arpabet_suffix = ''
if word.lower() in cmudict.heteronyms:
return word
if len(word) > 2 and word.endswith("'s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-2])
arpabet_suffix = ' Z'
elif len(word) > 1 and word.endswith("s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-1])
arpabet_suffix = ' Z'
else:
arpabet = cmudict.lookup(word)
if arpabet is None:
return word
elif arpabet[0] == '{':
arpabet = [arpabet[1:-1]]
# XXX arpabet might not be a list here
if type(arpabet) is not list:
return word
if len(arpabet) > 1:
if self.handle_arpabet_ambiguous == 'first':
arpabet = arpabet[0]
elif self.handle_arpabet_ambiguous == 'random':
arpabet = np.random.choice(arpabet)
elif self.handle_arpabet_ambiguous == 'ignore':
return word
else:
arpabet = arpabet[0]
arpabet = "{" + arpabet + arpabet_suffix + "}"
return arpabet
def encode_text(self, text, return_all=False):
if self.expand_currency:
text = re.sub(_currency_re, _expand_currency, text)
text_clean = [self.clean_text(split) if split[0] != '{' else split
for split in _arpa_re.findall(text)]
text_clean = ' '.join(text_clean)
text_clean = cleaners.collapse_whitespace(text_clean)
text = text_clean
text_arpabet = ''
if self.p_arpabet > 0:
if self.handle_arpabet == 'sentence':
if np.random.uniform() < self.p_arpabet:
words = _words_re.findall(text)
text_arpabet = [
self.get_arpabet(word[0])
if (word[0] != '') else word[1]
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet == 'word':
words = _words_re.findall(text)
text_arpabet = [
word[1] if word[0] == '' else (
self.get_arpabet(word[0])
if np.random.uniform() < self.p_arpabet
else word[0])
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet != '':
raise Exception("{} handle_arpabet is not supported".format(
self.handle_arpabet))
text_encoded = self.text_to_sequence(text)
if return_all:
return text_encoded, text_clean, text_arpabet
return text_encoded
|
TensorFlow/Translation/GNMT/utils | utils | misc_utils | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generally useful utility functions."""
from __future__ import print_function
import codecs
import collections
import json
import math
import os
import sys
import time
from distutils import version
import tensorflow as tf
def check_tensorflow_version():
# LINT.IfChange
min_tf_version = "1.3.0"
# LINT
if (version.LooseVersion(tf.__version__) <
version.LooseVersion(min_tf_version)):
raise EnvironmentError("Tensorflow version must >= %s" % min_tf_version)
def weighted_avg(inputs, weights, force_fp32=False):
dtype = tf.float32 if force_fp32 else inputs[0].dtype
inputs = [tf.cast(x, dtype) for x in inputs]
weights = [tf.cast(x, dtype) for x in weights]
norm = tf.add_n([x * y for x, y in zip(inputs, weights)])
denorm = tf.add_n(weights)
return norm / denorm
def safe_exp(value):
"""Exponentiation with catching of overflow error."""
try:
ans = math.exp(value)
except OverflowError:
ans = float("inf")
return ans
def print_time(s, start_time):
"""Take a start time, print elapsed duration, and return a new time."""
print("%s, time %ds, %s." % (s, (time.time() - start_time), time.ctime()))
sys.stdout.flush()
return time.time()
def print_out(s, f=None, new_line=True):
"""Similar to print but with support to flush and output to a file."""
if isinstance(s, bytes):
s = s.decode("utf-8")
if f:
f.write(s)
if new_line:
f.write(u"\n")
# stdout
out_s = s.encode("utf-8")
if not isinstance(out_s, str):
out_s = out_s.decode("utf-8")
print(out_s, end="", file=sys.stdout)
if new_line:
sys.stdout.write("\n")
sys.stdout.flush()
def print_hparams(hparams, skip_patterns=None, header=None):
"""Print hparams, can skip keys based on pattern."""
if header: print_out("%s" % header)
values = hparams.values()
for key in sorted(values.keys()):
if not skip_patterns or all(
[skip_pattern not in key for skip_pattern in skip_patterns]):
print_out(" %s=%s" % (key, str(values[key])))
def serialize_hparams(hparams):
"""Print hparams, can skip keys based on pattern."""
values = hparams.values()
res = ""
for key in sorted(values.keys()):
res += "%s=%s\n" % (key, str(values[key]))
return res
def load_hparams(model_dir):
"""Load hparams from an existing model directory."""
hparams_file = os.path.join(model_dir, "hparams")
if tf.gfile.Exists(hparams_file):
print_out("# Loading hparams from %s" % hparams_file)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_file, "rb")) as f:
try:
hparams_values = json.load(f)
hparams = tf.contrib.training.HParams(**hparams_values)
except ValueError:
print_out(" can't load hparams file")
return None
return hparams
else:
return None
def maybe_parse_standard_hparams(hparams, hparams_path):
"""Override hparams values with existing standard hparams config."""
if hparams_path and tf.gfile.Exists(hparams_path):
print_out("# Loading standard hparams from %s" % hparams_path)
with codecs.getreader("utf-8")(tf.gfile.GFile(hparams_path, "rb")) as f:
hparams.parse_json(f.read())
return hparams
def save_hparams(output_dir, hparams):
"""Save hparams."""
hparams_file = os.path.join(output_dir, "hparams")
print_out(" saving hparams to %s" % hparams_file)
with codecs.getwriter("utf-8")(tf.gfile.GFile(hparams_file, "wb")) as f:
f.write(hparams.to_json(indent=4, sort_keys=True))
def debug_tensor(s, msg=None, summarize=10):
"""Print the shape and value of a tensor at test time. Return a new tensor."""
if not msg:
msg = s.name
return tf.Print(s, [tf.shape(s), s], msg + " ", summarize=summarize)
def add_summary(summary_writer, global_step, tag, value):
"""Add a new summary to the current summary_writer."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
summary_writer.add_summary(summary, global_step)
def format_text(words):
"""Convert a sequence words into sentence."""
if (not hasattr(words, "__len__") and # for numpy array
not isinstance(words, collections.Iterable)):
words = [words]
return b" ".join(words)
def format_bpe_text(symbols, delimiter=b"@@"):
"""Convert a sequence of bpe words into sentence."""
words = []
word = b""
if isinstance(symbols, str):
symbols = symbols.encode()
delimiter_len = len(delimiter)
for symbol in symbols:
if len(symbol) >= delimiter_len and symbol[-delimiter_len:] == delimiter:
word += symbol[:-delimiter_len]
else: # end of a word
word += symbol
words.append(word)
word = b""
return b" ".join(words)
def format_spm_text(symbols):
"""Decode a text in SPM (https://github.com/google/sentencepiece) format."""
return u"".join(format_text(symbols).decode("utf-8").split()).replace(
u"\u2581", u" ").strip().encode("utf-8")
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2LSTMCellPlugin | taco2LSTMCellPlugin | taco2LSTMCellKernel | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "taco2LSTMCellKernel.h"
#include "taco2Utils.h"
#include "cuda_fp16.h"
#include <cassert>
#include <cmath>
#include <iostream>
#include <stdexcept>
#include <string>
using namespace tts;
namespace nvinfer1
{
namespace plugin
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int BLOCK_COL_SIZE = 128;
// must be at least 4 to allow computation of i,f,g,o by a single block
constexpr const int BLOCK_ROWS_PER_THREAD = 4;
} // namespace
/******************************************************************************
* CUDA KERNELS ***************************************************************
*****************************************************************************/
__device__ inline float sigmoid(const float x)
{
return 1.0f / (1.0f + exp(-x));
}
__device__ inline float dot2(const float2 a, const __half2 b)
{
float2 bf = __half22float2(b);
return a.x * bf.x + a.y * bf.y;
}
template <typename T, int NUM_THREADS>
__device__ inline T warpSum(T const initVal)
{
constexpr const uint32_t mask = NUM_THREADS < 32 ? (1u << NUM_THREADS) - 1 : 0xffffffff;
T val = initVal;
#pragma unroll
for (int d = NUM_THREADS / 2; d > 0; d /= 2)
{
val += __shfl_down_sync(mask, val, d, NUM_THREADS);
}
return val;
}
// template <typename T, int BLOCK_SIZE>
//__device__ T cooperativeSum(T const initVal, T* const buffer)
//{
// // first all warps reduce to single value
// assert(BLOCK_SIZE % WARP_SIZE == 0);
// assert(BLOCK_SIZE <= WARP_SIZE * WARP_SIZE);
//
// int val = warpSum<T, WARP_SIZE>(initVal);
// if (threadIdx.x % WARP_SIZE == 0) {
// buffer[threadIdx.x / WARP_SIZE] = val;
// }
// __syncthreads();
//
// if (threadIdx.x < (BLOCK_SIZE / WARP_SIZE)) {
// val = warpSum<T, BLOCK_SIZE / WARP_SIZE>(buffer[threadIdx.x]);
// }
//
// return val;
//}
__device__ void sumBlock(float* const shared)
{
constexpr const int chunkSize = BLOCK_COL_SIZE / BLOCK_ROWS_PER_THREAD;
const int tid = threadIdx.x % chunkSize;
const int chunkId = threadIdx.x / chunkSize;
assert(chunkSize <= 32);
float val = 0.0f;
#pragma unroll
for (int i = tid; i < BLOCK_COL_SIZE; i += chunkSize)
{
val += shared[chunkId * BLOCK_COL_SIZE + i];
}
val = warpSum<float, chunkSize>(val);
if (tid == 0)
{
shared[chunkId * BLOCK_COL_SIZE] = val;
}
__syncthreads();
}
template <int INPUT_LENGTH_A, int INPUT_LENGTH_B, int NUM_DIMENSIONS>
__global__ void lstmCellRowHalfKernel(const __half2* const weights, const float* const bias, const float2* const inputA,
const float2* const inputB, const float2* const hiddenIn, const float* const cellIn, float* const hiddenOut,
float* const cellOut)
{
__shared__ float shared[BLOCK_COL_SIZE * BLOCK_ROWS_PER_THREAD];
const int rowOffset = blockIdx.x * BLOCK_ROWS_PER_THREAD;
{
constexpr const int numCols = INPUT_LENGTH_A + INPUT_LENGTH_B + NUM_DIMENSIONS;
float values[BLOCK_ROWS_PER_THREAD];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] = 0.0f;
}
// input A
for (int col = threadIdx.x; col < INPUT_LENGTH_A / 2; col += BLOCK_COL_SIZE)
{
const float2 v = inputA[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += dot2(v, weights[(rowOffset + row) * (numCols / 2) + col]);
}
}
// input B
for (int col = threadIdx.x; col < INPUT_LENGTH_B / 2; col += BLOCK_COL_SIZE)
{
const float2 v = inputB[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += dot2(v, weights[(rowOffset + row) * (numCols / 2) + (INPUT_LENGTH_A / 2) + col]);
}
}
// hidden input
for (int col = threadIdx.x; col < NUM_DIMENSIONS / 2; col += BLOCK_COL_SIZE)
{
const float2 v = hiddenIn[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += dot2(
v, weights[(rowOffset + row) * (numCols / 2) + ((INPUT_LENGTH_A + INPUT_LENGTH_B) / 2) + col]);
}
}
// place outputs into shared memory for reduction
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
shared[row * BLOCK_COL_SIZE + threadIdx.x] = values[row];
}
}
__syncthreads();
sumBlock(shared);
{
const int globalRow = rowOffset + threadIdx.x;
// add bias and functify (first four threads only)
if (threadIdx.x < BLOCK_ROWS_PER_THREAD)
{
float sum = shared[threadIdx.x * BLOCK_COL_SIZE] + bias[globalRow];
if (threadIdx.x % 4 == 2)
{
// g gets tanh
sum = tanh(sum);
}
else
{
// everything else gets sigmoid
sum = sigmoid(sum);
}
shared[threadIdx.x * BLOCK_COL_SIZE] = sum;
__syncwarp(0x0000000f);
if ((threadIdx.x % 4) == 0)
{
const int stateRow = globalRow / 4;
const float i = shared[(threadIdx.x + 0) * BLOCK_COL_SIZE];
const float f = shared[(threadIdx.x + 1) * BLOCK_COL_SIZE];
const float g = shared[(threadIdx.x + 2) * BLOCK_COL_SIZE];
const float o = shared[(threadIdx.x + 3) * BLOCK_COL_SIZE];
const float c = cellIn[stateRow];
const float cPrime = f * c + i * g;
const float hPrime = o * tanh(cPrime);
cellOut[stateRow] = cPrime;
hiddenOut[stateRow] = hPrime;
}
}
}
}
template <int INPUT_LENGTH_A, int INPUT_LENGTH_B, int NUM_DIMENSIONS>
__global__ void lstmCellRowFloatKernel(const float* const weights, const float* const bias, const float* const inputA,
const float* const inputB, const float* const hiddenIn, const float* const cellIn, float* const hiddenOut,
float* const cellOut)
{
__shared__ float shared[BLOCK_COL_SIZE * BLOCK_ROWS_PER_THREAD];
const int rowOffset = blockIdx.x * BLOCK_ROWS_PER_THREAD;
{
constexpr const int numCols = NUM_DIMENSIONS + INPUT_LENGTH_A + INPUT_LENGTH_B;
float values[BLOCK_ROWS_PER_THREAD];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] = 0.0f;
}
// input A
for (int col = threadIdx.x; col < INPUT_LENGTH_A; col += BLOCK_COL_SIZE)
{
const float v = inputA[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += v * weights[(rowOffset + row) * numCols + col];
}
}
// input B
for (int col = threadIdx.x; col < INPUT_LENGTH_B; col += BLOCK_COL_SIZE)
{
const float v = inputB[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += v * weights[(rowOffset + row) * numCols + INPUT_LENGTH_A + col];
}
}
// hidden input
for (int col = threadIdx.x; col < NUM_DIMENSIONS; col += BLOCK_COL_SIZE)
{
const float v = hiddenIn[col];
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
values[row] += v * weights[(rowOffset + row) * numCols + (INPUT_LENGTH_A + INPUT_LENGTH_B) + col];
}
}
// place outputs into shared memory for reduction
for (int row = 0; row < BLOCK_ROWS_PER_THREAD; ++row)
{
shared[row * BLOCK_COL_SIZE + threadIdx.x] = values[row];
}
}
__syncthreads();
sumBlock(shared);
{
const int globalRow = rowOffset + threadIdx.x;
// add bias and functify (first four threads only)
if (threadIdx.x < BLOCK_ROWS_PER_THREAD)
{
float sum = shared[threadIdx.x * BLOCK_COL_SIZE] + bias[globalRow];
if (threadIdx.x % 4 == 2)
{
// g gets tanh
sum = tanh(sum);
}
else
{
// everything else gets sigmoid
sum = sigmoid(sum);
}
shared[threadIdx.x * BLOCK_COL_SIZE] = sum;
}
__syncwarp(0x0000000f);
if (threadIdx.x < BLOCK_ROWS_PER_THREAD && (threadIdx.x % 4) == 0)
{
const int stateRow = globalRow / 4;
const float i = shared[(threadIdx.x + 0) * BLOCK_COL_SIZE];
const float f = shared[(threadIdx.x + 1) * BLOCK_COL_SIZE];
const float g = shared[(threadIdx.x + 2) * BLOCK_COL_SIZE];
const float o = shared[(threadIdx.x + 3) * BLOCK_COL_SIZE];
const float c = cellIn[stateRow];
const float cPrime = f * c + i * g;
const float hPrime = o * tanh(cPrime);
cellOut[stateRow] = cPrime;
hiddenOut[stateRow] = hPrime;
}
}
}
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
size_t stride(const size_t i, const size_t n, const size_t s)
{
return ((i * (n / s)) % n) + (i / s);
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Taco2LSTMCellKernel::Taco2LSTMCellKernel(
const float* const inputWeightsHost,
const float* const hiddenWeightsHost,
const float* const inputBiasHost,
const float* const hiddenBiasHost,
const int inputLength,
const int numDimension,
const bool useFP16) :
mInputLength(inputLength),
mNumDimension(numDimension),
mFp16(useFP16),
mWeightsDevice(),
mBiasDevice()
{
const size_t numRows = 4 * mNumDimension;
{ // combine weights into single matrix on device [W_i W_h], in column
// major order, and in i_0, f_0, g_0, o_0, ... i_n, f_n, g_n, o_n order.
std::vector<float> weightCat((mNumDimension + mInputLength) * numRows);
// row wise strided
for (size_t i = 0; i < numRows; ++i)
{
for (size_t j = 0; j < static_cast<size_t>(mInputLength); ++j)
{
weightCat[i * (mInputLength + mNumDimension) + j]
= inputWeightsHost[stride(i, numRows, 4) * mInputLength + j];
}
}
for (size_t i = 0; i < numRows; ++i)
{
for (size_t j = 0; j < static_cast<size_t>(mNumDimension); ++j)
{
weightCat[i * (mInputLength + mNumDimension) + mInputLength + j]
= hiddenWeightsHost[stride(i, numRows, 4) * mNumDimension + j];
}
}
if (mFp16)
{
// copy to device as floats
CudaMemory<float> weightsFloatDevice(weightCat);
// convert to halfs
mWeightsDevice = CudaMemory<float>(
taco2::Taco2Utils::roundUpBlocks(weightsFloatDevice.size(), 2));
taco2::Taco2Utils::floatsToHalves(
weightsFloatDevice.data(),
mWeightsDevice.data(),
weightsFloatDevice.size());
}
else
{
mWeightsDevice = CudaMemory<float>(weightCat);
}
}
{ // add biases togethor before moving to device [b_i + b_h],
// and in i_0, f_0, g_0, o_0, ... i_n, f_n, g_n, o_n order.
std::vector<float> biasSum(numRows);
for (size_t i = 0; i < biasSum.size(); ++i)
{
const size_t j = stride(i, numRows, 4);
assert(j < numRows);
biasSum[i] = inputBiasHost[j] + hiddenBiasHost[j];
}
mBiasDevice = CudaMemory<float>(biasSum.size());
taco2::Taco2Utils::copyHostToDevice(static_cast<float*>(mBiasDevice.data()), biasSum.data(), biasSum.size());
}
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void Taco2LSTMCellKernel::execute(const float* const inputA, const float* const inputB, const float* const hiddenIn,
const float* const cellIn, float* const hiddenOut, float* const cellOut, const int inputLengthA,
const int inputLengthB, cudaStream_t stream)
{
assert(inputLengthA + inputLengthB == mInputLength);
const int numBlocks = taco2::Taco2Utils::roundUpBlocks(mNumDimension * 4, BLOCK_ROWS_PER_THREAD);
const dim3 grid(numBlocks);
const dim3 block(BLOCK_COL_SIZE);
assert(mNumDimension == 1024);
assert(inputLengthB == 512);
if (mFp16)
{
if (inputLengthA == 256)
{
lstmCellRowHalfKernel<256, 512, 1024><<<grid, block, 0, stream>>>(
reinterpret_cast<const __half2*>(mWeightsDevice.data()),
mBiasDevice.data(),
reinterpret_cast<const float2*>(inputA),
reinterpret_cast<const float2*>(inputB),
reinterpret_cast<const float2*>(hiddenIn),
cellIn,
hiddenOut,
cellOut);
}
else if (inputLengthA == 1024)
{
lstmCellRowHalfKernel<1024, 512, 1024><<<grid, block, 0, stream>>>(
reinterpret_cast<const __half2*>(mWeightsDevice.data()),
mBiasDevice.data(),
reinterpret_cast<const float2*>(inputA),
reinterpret_cast<const float2*>(inputB),
reinterpret_cast<const float2*>(hiddenIn),
cellIn,
hiddenOut,
cellOut);
}
else
{
throw std::runtime_error("Unsupported Input A length of " + std::to_string(inputLengthA));
}
}
else
{
if (inputLengthA == 256)
{
lstmCellRowFloatKernel<256, 512, 1024><<<grid, block, 0, stream>>>(
mWeightsDevice.data(),
mBiasDevice.data(),
inputA,
inputB,
hiddenIn,
cellIn,
hiddenOut,
cellOut);
}
else if (inputLengthA == 1024)
{
lstmCellRowFloatKernel<1024, 512, 1024><<<grid, block, 0, stream>>>(
mWeightsDevice.data(),
mBiasDevice.data(),
inputA,
inputB,
hiddenIn,
cellIn,
hiddenOut,
cellOut);
}
else
{
throw std::runtime_error("Unsupported Input A length of " + std::to_string(inputLengthA));
}
}
}
} // namespace plugin
} // namespace nvinfer1
|
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/ops | ops | spatial_transform_ops | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def nearest_upsampling(data, scale):
"""Nearest neighbor upsampling implementation.
Args:
data: A tensor with a shape of [batch, height_in, width_in, channels].
scale: An integer multiple to scale resolution of input data.
Returns:
data_up: A tensor with a shape of
[batch, height_in*scale, width_in*scale, channels]. Same dtype as input
data.
"""
with tf.name_scope('nearest_upsampling'):
bs, h, w, c = tf.unstack(tf.shape(input=data))
# Use reshape to quickly upsample the input.
# The nearest pixel is selected implicitly via broadcasting.
# data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones([1, 1, scale, 1, scale, 1], dtype=data.dtype)
# Instead of broadcasting with a 6-d tensor, we're using stacking here
# for TfLite compatibity.
output = tf.stack([data] * scale, axis=3)
output = tf.stack([output] * scale, axis=2)
return tf.reshape(output, [bs, h * scale, w * scale, c])
def selective_crop_and_resize(features,
boxes,
box_levels,
boundaries,
output_size=7,
training=True):
"""Crop and resize boxes on a set of feature maps.
Given multiple features maps indexed by different levels, and a set of boxes
where each box is mapped to a certain level, it selectively crops and resizes
boxes from the corresponding feature maps to generate the box features.
We follow the ROIAlign technique (see https://arxiv.org/pdf/1703.06870.pdf,
figure 3 for reference). Specifically, for each feature map, we select an
(output_size, output_size) set of pixels corresponding to the box location,
and then use bilinear interpolation to select the feature value for each
pixel.
For performance, we perform the gather and interpolation on all layers as a
single operation. This is op the multi-level features are first stacked and
gathered into [2*output_size, 2*output_size] feature points. Then bilinear
interpolation is performed on the gathered feature points to generate
[output_size, output_size] RoIAlign feature map.
Here is the step-by-step algorithm:
1. The multi-level features are gathered into a
[batch_size, num_boxes, output_size*2, output_size*2, num_filters]
Tensor. The Tensor contains four neighboring feature points for each
vertice in the output grid.
2. Compute the interpolation kernel of shape
[batch_size, num_boxes, output_size*2, output_size*2]. The last 2 axis
can be seen as stacking 2x2 interpolation kernels for all vertices in the
output grid.
3. Element-wise multiply the gathered features and interpolation kernel.
Then apply 2x2 average pooling to reduce spatial dimension to
output_size.
Args:
features: a 5-D tensor of shape
[batch_size, num_levels, max_height, max_width, num_filters] where
cropping and resizing are based.
boxes: a 3-D tensor of shape [batch_size, num_boxes, 4] encoding the
information of each box w.r.t. the corresponding feature map.
boxes[:, :, 0:2] are the grid position in (y, x) (float) of the top-left
corner of each box. boxes[:, :, 2:4] are the box sizes in (h, w) (float)
in terms of the number of pixels of the corresponding feature map size.
box_levels: a 3-D tensor of shape [batch_size, num_boxes, 1] representing
the 0-based corresponding feature level index of each box.
boundaries: a 3-D tensor of shape [batch_size, num_boxes, 2] representing
the boundary (in (y, x)) of the corresponding feature map for each box.
Any resampled grid points that go beyond the bounary will be clipped.
output_size: a scalar indicating the output crop size.
training: whether to build the model for training (or inference).
Returns:
features_per_box: a 5-D tensor of shape
[batch_size, num_boxes, output_size, output_size, num_filters]
representing the cropped features.
"""
(batch_size, num_levels, max_feature_height, max_feature_width,
num_filters) = features.get_shape().as_list()
_, num_boxes, _ = boxes.get_shape().as_list()
# Compute the grid position w.r.t. the corresponding feature map.
box_grid_x = []
box_grid_y = []
for i in range(output_size):
box_grid_x.append(boxes[:, :, 1:2] +
(i + 0.5) * boxes[:, :, 3:4] / output_size)
box_grid_y.append(boxes[:, :, 0:1] +
(i + 0.5) * boxes[:, :, 2:3] / output_size)
box_grid_x = tf.concat(box_grid_x, axis=-1)
box_grid_y = tf.concat(box_grid_y, axis=-1)
# Compute indices for gather operation.
box_grid_y0 = tf.floor(box_grid_y)
box_grid_x0 = tf.floor(box_grid_x)
box_grid_x0 = tf.maximum(0., box_grid_x0)
box_grid_y0 = tf.maximum(0., box_grid_y0)
box_gridx0x1 = tf.stack([
tf.minimum(box_grid_x0, boundaries[:, :, 1:2]),
tf.minimum(box_grid_x0 + 1, boundaries[:, :, 1:2])
],
axis=3)
box_gridy0y1 = tf.stack([
tf.minimum(box_grid_y0, boundaries[:, :, 0:1]),
tf.minimum(box_grid_y0 + 1, boundaries[:, :, 0:1])
],
axis=3)
x_indices = tf.reshape(box_gridx0x1, [batch_size, num_boxes, output_size * 2])
y_indices = tf.reshape(box_gridy0y1, [batch_size, num_boxes, output_size * 2])
# If using GPU for inference, delay the cast until when Gather ops show up
# since GPU inference supports float point better.
# TODO(laigd): revisit this when newer versions of GPU libraries is released.
indices_dtype = tf.float32 if not training else tf.int32
if training:
x_indices = tf.cast(x_indices, tf.int32)
y_indices = tf.cast(y_indices, tf.int32)
height_dim_offset = max_feature_width
level_dim_offset = max_feature_height * height_dim_offset
batch_dim_offset = num_levels * level_dim_offset
batch_dim_indices = (
tf.reshape(tf.range(batch_size, dtype=indices_dtype) * batch_dim_offset, [batch_size, 1, 1, 1]) *
tf.ones([1, num_boxes, output_size * 2, output_size * 2], dtype=indices_dtype)
)
box_level_indices = (
tf.reshape(box_levels * level_dim_offset, [batch_size, num_boxes, 1, 1]) *
tf.ones([1, 1, output_size * 2, output_size * 2], dtype=indices_dtype)
)
height_indices = (
tf.reshape(y_indices * height_dim_offset, [batch_size, num_boxes, output_size * 2, 1]) *
tf.ones([1, 1, 1, output_size * 2], dtype=indices_dtype)
)
width_indices = (
tf.reshape(x_indices, [batch_size, num_boxes, 1, output_size * 2]) *
tf.ones([1, 1, output_size * 2, 1], dtype=indices_dtype)
)
batch_dim_indices = tf.cast(batch_dim_indices, tf.float32)
box_level_indices = tf.cast(box_level_indices, tf.float32)
height_indices = tf.cast(height_indices, tf.float32)
width_indices = tf.cast(width_indices, tf.float32)
indices = tf.add_n([
batch_dim_indices,
box_level_indices,
height_indices,
width_indices,
])
indices = tf.cast(indices, tf.int32)
if batch_size == 1:
# Special handling for single batch input to make it friendly for GPU
# inference.
indices = tf.reshape(indices, [1, -1])
if not training:
indices = tf.cast(indices, dtype=tf.int32)
features = tf.reshape(features, [1, -1, num_filters])
# Cast should happen at last since GPU has better support for floating point
# operations.
features_per_box = tf.gather(features, indices, axis=1)
else:
indices = tf.reshape(indices, [-1])
if not training:
indices = tf.cast(indices, dtype=tf.int32)
features = tf.reshape(features, [-1, num_filters])
features_per_box = tf.gather(features, indices)
features_per_box = tf.reshape(
features_per_box,
[batch_size, num_boxes, output_size * 2, output_size * 2, num_filters]
)
# The RoIAlign feature f can be computed by bilinear interpolation of four
# neighboring feature points f0, f1, f2, and f3.
# f(y, x) = [hy, ly] * [[f00, f01], * [hx, lx]^T
# [f10, f11]]
# f(y, x) = (hy*hx)f00 + (hy*lx)f01 + (ly*hx)f10 + (lx*ly)f11
# f(y, x) = w00*f00 + w01*f01 + w10*f10 + w11*f11
ly = box_grid_y - box_grid_y0
lx = box_grid_x - box_grid_x0
hy = 1.0 - ly
hx = 1.0 - lx
kernel_x = tf.reshape(tf.stack([hx, lx], axis=3), [batch_size, num_boxes, 1, output_size * 2])
kernel_y = tf.reshape(tf.stack([hy, ly], axis=3), [batch_size, num_boxes, output_size * 2, 1])
# Use implicit broadcast to generate the interpolation kernel. The
# multiplier `4` is for avg pooling.
interpolation_kernel = kernel_y * kernel_x * 4
# Interpolate the gathered features with computed interpolation kernels.
features_per_box *= tf.cast(tf.expand_dims(interpolation_kernel, axis=4), dtype=features_per_box.dtype)
features_per_box = tf.reshape(
features_per_box,
[batch_size * num_boxes, output_size * 2, output_size * 2, num_filters]
)
features_per_box = tf.nn.avg_pool2d(input=features_per_box, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='VALID')
features_per_box = tf.reshape(features_per_box,
[batch_size, num_boxes, output_size, output_size, num_filters])
return features_per_box
def multilevel_crop_and_resize(features,
boxes,
output_size=7,
training=True):
"""Crop and resize on multilevel feature pyramid.
Generate the (output_size, output_size) set of pixels for each input box
by first locating the box into the correct feature level, and then cropping
and resizing it using the correspoding feature map of that level.
Args:
features: A dictionary with key as pyramid level and value as features. The
features are in shape of [batch_size, height_l, width_l, num_filters].
boxes: A 3-D Tensor of shape [batch_size, num_boxes, 4]. Each row represents
a box with [y1, x1, y2, x2] in un-normalized coordinates.
output_size: A scalar to indicate the output crop size.
training: whether to build the model for training (or inference).
Returns:
A 5-D tensor representing feature crop of shape
[batch_size, num_boxes, output_size, output_size, num_filters].
"""
with tf.name_scope('multilevel_crop_and_resize'):
levels = features.keys()
min_level = min(levels)
max_level = max(levels)
_, max_feature_height, max_feature_width, _ = (
features[min_level].get_shape().as_list())
# Stack feature pyramid into a features_all of shape
# [batch_size, levels, height, width, num_filters].
features_all = []
for level in range(min_level, max_level + 1):
features_all.append(
tf.image.pad_to_bounding_box(features[level], 0, 0, max_feature_height, max_feature_width))
features_all = tf.stack(features_all, axis=1)
# Assign boxes to the right level.
box_width = tf.squeeze(boxes[:, :, 3:4] - boxes[:, :, 1:2], axis=-1)
box_height = tf.squeeze(boxes[:, :, 2:3] - boxes[:, :, 0:1], axis=-1)
areas_sqrt = tf.sqrt(box_height * box_width)
levels = tf.math.floordiv(tf.math.log(tf.divide(areas_sqrt, 224.0)), tf.math.log(2.0)) + 4.0
if training:
levels = tf.cast(levels, dtype=tf.int32)
# Map levels between [min_level, max_level].
levels = tf.minimum(
float(max_level) if not training else max_level,
tf.maximum(levels, float(min_level) if not training else min_level)
)
# Project box location and sizes to corresponding feature levels.
scale_to_level = tf.cast(
tf.pow(tf.constant(2.0), levels if not training else tf.cast(levels, tf.float32)),
dtype=boxes.dtype
)
boxes /= tf.expand_dims(scale_to_level, axis=2)
box_width /= scale_to_level
box_height /= scale_to_level
boxes = tf.concat(
[boxes[:, :, 0:2],
tf.expand_dims(box_height, -1),
tf.expand_dims(box_width, -1)],
axis=-1
)
# Map levels to [0, max_level-min_level].
levels -= min_level
level_strides = tf.pow([[2.0]], levels if not training else tf.cast(levels, tf.float32))
boundary = tf.cast(
tf.concat(
[
tf.expand_dims([[tf.cast(max_feature_height, tf.float32)]] / level_strides - 1, axis=-1),
tf.expand_dims([[tf.cast(max_feature_width, tf.float32)]] / level_strides - 1, axis=-1),
],
axis=-1
),
boxes.dtype
)
return selective_crop_and_resize(
features=features_all,
boxes=boxes,
box_levels=levels,
boundaries=boundary,
output_size=output_size,
training=training
)
|
PyTorch/Segmentation/nnUNet/triton | triton | dataloader | import numpy as np
from data_loading.dali_loader import fetch_dali_loader
from sklearn.model_selection import KFold
from utils.utils import get_split, load_data
def get_dataloader_fn(*, data_dir: str, batch_size: int, precision: str):
kwargs = {
"dim": 3,
"gpus": 1,
"seed": 0,
"num_workers": 8,
"meta": None,
"oversampling": 0,
"benchmark": False,
"patch_size": [128, 128, 128],
}
imgs, lbls = load_data(data_dir, "*_x.npy"), load_data(data_dir, "*_y.npy")
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
_, val_idx = list(kfold.split(imgs))[2]
imgs, lbls = get_split(imgs, val_idx), get_split(lbls, val_idx)
dataloader = fetch_dali_loader(imgs, lbls, batch_size, "bermuda", **kwargs)
def _dataloader_fn():
for i, batch in enumerate(dataloader):
fname = [f"{i}_{j}" for j in range(batch_size)]
img = batch["image"].numpy()
if "fp16" in precision:
img = img.astype(np.half)
img = {"INPUT__0": img}
lbl = {"OUTPUT__0": batch["label"].squeeze(1).numpy().astype(int)}
yield fname, img, lbl
return _dataloader_fn
|
PyTorch/SpeechRecognition/Jasper/common/text/unidecoder | unidecoder | homoglyphs | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
|
PyTorch/Translation/Transformer/fairseq/models | models | transformer | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from typing import Optional, Dict
from fairseq.modules import (
LearnedPositionalEmbedding, MultiheadAttention, SinusoidalPositionalEmbedding
)
from . import (
FairseqIncrementalDecoder, register_model,
register_model_architecture,
)
from apex.normalization.fused_layer_norm import FusedLayerNorm
torch.set_printoptions(threshold=500000000, linewidth=1024)
@torch.jit.script
def jit_dropout_add(x, residual, prob, is_training):
# type: (Tensor, Tensor, float, bool) -> Tensor
out = F.dropout(x, p=prob, training=is_training)
out = residual + out
return out
@torch.jit.script
def jit_relu_dropout(x, prob, is_training):
# type: (Tensor, float, bool) -> Tensor
out = F.threshold(x, 0., 0.)
out = F.dropout(out, p=prob, training=is_training)
return out
@register_model('transformer')
class TransformerModel(nn.Module):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--relu-dropout', type=float, metavar='D',
help='dropout probability after ReLU in FFN')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
def __init__(self, encoder, decoder):
super().__init__()
self._is_generation_fast = False
self.encoder = encoder
self.decoder = decoder
@classmethod
def build_model(cls, args):
# make sure all arguments are present in older models
base_architecture(args)
if not hasattr(args, 'max_source_positions'):
args.max_source_positions = 1024
if not hasattr(args, 'max_target_positions'):
args.max_target_positions = 1024
if args.share_all_embeddings:
if args.src_vocab_size != args.tgt_vocab_size:
raise RuntimeError('--share-all-embeddings requires a joined dictionary')
if args.encoder_embed_dim != args.decoder_embed_dim:
raise RuntimeError(
'--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path):
raise RuntimeError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = Embedding(args.src_vocab_size, args.encoder_embed_dim, args.padding_idx)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = Embedding(args.src_vocab_size, args.encoder_embed_dim, args.padding_idx)
decoder_embed_tokens = Embedding(args.tgt_vocab_size, args.decoder_embed_dim, args.padding_idx)
encoder = TransformerEncoder(args, encoder_embed_tokens)
decoder = TransformerDecoder(args, decoder_embed_tokens)
return TransformerModel(encoder, decoder)
def make_generation_fast_(self, **kwargs):
"""Optimize model for faster generation."""
if self._is_generation_fast:
return # only apply once
self._is_generation_fast = True
# remove weight norm from all modules in the network
def apply_remove_weight_norm(module):
try:
nn.utils.remove_weight_norm(module)
except ValueError: # this module didn't have weight norm
return
self.apply(apply_remove_weight_norm)
def apply_make_generation_fast_(module):
if module != self and hasattr(module, 'make_generation_fast_'):
module.make_generation_fast_(**kwargs)
self.apply(apply_make_generation_fast_)
def train(mode):
if mode:
raise RuntimeError('cannot train after make_generation_fast')
# this model should no longer be used for training
self.eval()
self.train = train
def forward(self, src_tokens, src_lengths, prev_output_tokens):
encoder_out, padding_mask = self.encoder(src_tokens, src_lengths)
decoder_out = self.decoder(prev_output_tokens, encoder_out, padding_mask)
return decoder_out
class TransformerEncoder(nn.Module):
"""Transformer encoder."""
def __init__(self, args, embed_tokens, left_pad=True):
super().__init__()
self.dropout = args.dropout
self.fuse_dropout_add = args.fuse_dropout_add
self.fuse_relu_dropout = args.fuse_relu_dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_source_positions, embed_dim, self.padding_idx,
left_pad=left_pad,
learned=args.encoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(args)
for i in range(args.encoder_layers)
])
self.normalize = args.encoder_normalize_before
if self.normalize:
self.layer_norm = FusedLayerNorm(embed_dim) if args.fuse_layer_norm else nn.LayerNorm(embed_dim)
def forward(self, src_tokens, src_lengths):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x += self.embed_positions(src_tokens)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
# The tensor needs to copy transposed because
# fused dropout is not capable of handing strided data
if self.fuse_dropout_add:
x = x.transpose(0, 1).contiguous()
else:
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
if not encoder_padding_mask.any():
_encoder_padding_mask = None
else:
_encoder_padding_mask = encoder_padding_mask
# encoder layers
for layer in self.layers:
x = layer(x, _encoder_padding_mask)
if self.normalize:
x = self.layer_norm(x)
return x, encoder_padding_mask # x.shape == T x B x C, encoder_padding_mask.shape == B x T
def reorder_encoder_out(self, encoder_out, encoder_padding_mask, new_order):
if encoder_out is not None:
encoder_out = encoder_out.index_select(1, new_order)
if encoder_padding_mask is not None:
encoder_padding_mask = encoder_padding_mask.index_select(0, new_order)
return encoder_out, encoder_padding_mask
class TransformerDecoder(FairseqIncrementalDecoder):
"""Transformer decoder."""
def __init__(self, args, embed_tokens, no_encoder_attn=False, left_pad=False):
super().__init__()
self.dropout = args.dropout
self.share_input_output_embed = args.share_decoder_input_output_embed
self.fuse_dropout_add = args.fuse_dropout_add
self.fuse_relu_dropout = args.fuse_relu_dropout
embed_dim = embed_tokens.embedding_dim
padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = PositionalEmbedding(
args.max_target_positions, embed_dim, padding_idx,
left_pad=left_pad,
learned=args.decoder_learned_pos,
) if not args.no_token_positional_embeddings else None
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerDecoderLayer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
])
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(torch.Tensor(args.tgt_vocab_size, embed_dim))
nn.init.normal_(self.embed_out, mean=0, std=embed_dim ** -0.5)
else:
self.embed_out = self.embed_tokens.weight
self.normalize = args.decoder_normalize_before
if self.normalize:
self.layer_norm = FusedLayerNorm(embed_dim) if args.fuse_layer_norm else nn.LayerNorm(embed_dim)
def forward(self,
prev_output_tokens: Tensor,
encoder_out: Tensor,
encoder_padding_mask: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Tensor]]]=None):
# embed positions
positions = self.embed_positions(
prev_output_tokens,
incremental_state=incremental_state,
) if self.embed_positions is not None else None
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
# The tensor needs to copy transposed because
# fused dropout is not capable of handing strided data
if self.fuse_dropout_add:
x = x.transpose(0, 1).contiguous()
else:
x = x.transpose(0, 1)
attn = None
# decoder layers
for layer in self.layers:
x, attn = layer(
x,
encoder_out,
encoder_padding_mask if encoder_padding_mask.any() else None,
incremental_state,
)
if self.normalize:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# project back to size of vocabulary
x = F.linear(x, self.embed_out)
return x, attn
class TransformerEncoderLayer(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: dropout -> add residual -> layernorm.
In the tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
dropout -> add residual.
We default to the approach in the paper, but the tensor2tensor approach can
be enabled by setting `normalize_before=True`.
"""
def __init__(self, args):
super().__init__()
self.embed_dim = args.encoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.encoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.fuse_dropout_add = args.fuse_dropout_add
self.fuse_relu_dropout = args.fuse_relu_dropout
self.normalize_before = args.encoder_normalize_before
self.fc1 = Linear(self.embed_dim, args.encoder_ffn_embed_dim)
self.fc2 = Linear(args.encoder_ffn_embed_dim, self.embed_dim)
self.maybe_ln1 = MaybeLayerNorm(self.embed_dim, self.normalize_before, fuse=args.fuse_layer_norm)
self.maybe_ln2 = MaybeLayerNorm(self.embed_dim, self.normalize_before, fuse=args.fuse_layer_norm)
def forward(self, x: Tensor, encoder_padding_mask: Optional[Tensor]):
residual = x
x = self.maybe_ln1(x, before=True)
x, _ = self.self_attn(query=x, key=x, value=x,
mask_future_timesteps=False,
key_padding_mask=encoder_padding_mask,
incremental_state=None,
need_weights=False,
static_kv=False)
if self.fuse_dropout_add and self.training:
x = jit_dropout_add(x, residual, self.dropout, self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_ln1(x, after=True)
residual = x
x = self.maybe_ln2(x, before=True)
if self.fuse_relu_dropout:
x = jit_relu_dropout(self.fc1(x), self.relu_dropout, self.training)
else:
x = F.threshold(self.fc1(x), 0.0, 0.0)
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
if self.fuse_dropout_add and self.training:
x = jit_dropout_add(x, residual, self.dropout, self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.maybe_ln2(x, after=True)
return x
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block."""
def __init__(self, args, no_encoder_attn=False):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.self_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.dropout = args.dropout
self.relu_dropout = args.relu_dropout
self.normalize_before = args.decoder_normalize_before
self.fuse_dropout_add = args.fuse_dropout_add
self.fuse_relu_dropout = args.fuse_relu_dropout
self.self_attn_layer_norm = MaybeLayerNorm(
self.embed_dim, self.normalize_before, fuse=args.fuse_layer_norm)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = MultiheadAttention(
self.embed_dim, args.decoder_attention_heads,
dropout=args.attention_dropout,
)
self.encoder_attn_layer_norm = MaybeLayerNorm(
self.embed_dim, self.normalize_before, fuse=args.fuse_layer_norm)
self.fc1 = Linear(self.embed_dim, args.decoder_ffn_embed_dim)
self.fc2 = Linear(args.decoder_ffn_embed_dim, self.embed_dim)
self.final_layer_norm = MaybeLayerNorm(
self.embed_dim, self.normalize_before, fuse=args.fuse_layer_norm)
self.need_attn = True
def forward(self,
x: Tensor,
encoder_out: Tensor,
encoder_padding_mask: Optional[Tensor],
incremental_state: Optional[Dict[str, Dict[str, Tensor]]]):
residual = x
x = self.self_attn_layer_norm(x, before=True)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
mask_future_timesteps=True,
key_padding_mask=None,
incremental_state=incremental_state,
need_weights=False,
static_kv=False
)
if self.fuse_dropout_add and self.training:
x = jit_dropout_add(x, residual, self.dropout, self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x, after=True)
attn = None
if self.encoder_attn is not None:
residual = x
x = self.encoder_attn_layer_norm(x, before=True)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
mask_future_timesteps=False,
need_weights=(not self.training and self.need_attn),
)
if self.fuse_dropout_add and self.training:
x = jit_dropout_add(x, residual, self.dropout, self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.encoder_attn_layer_norm(x, after=True)
residual = x
x = self.final_layer_norm(x, before=True)
if self.fuse_relu_dropout:
x = jit_relu_dropout(self.fc1(x), self.relu_dropout, self.training)
else:
x = F.threshold(self.fc1(x), 0.0, 0.0)
x = F.dropout(x, p=self.relu_dropout, training=self.training)
x = self.fc2(x)
if self.fuse_dropout_add and self.training:
x = jit_dropout_add(x, residual, self.dropout, self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x, after=True)
return x, attn
def make_generation_fast_(self, need_attn=False, **kwargs):
self.need_attn = need_attn
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
class MaybeLayerNorm(nn.Module):
def __init__(self, embed_dim, normalize_before, fuse=True):
super().__init__()
self.embed_dim = embed_dim
self.normalize_before = normalize_before
self.ln = FusedLayerNorm(embed_dim) if fuse else nn.LayerNorm(embed_dim)
def forward(self, x: Tensor, before: bool = False, after: bool = False):
assert before ^ after
if after ^ self.normalize_before:
return self.ln(x)
else:
return x
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.)
return m
def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx, left_pad, learned=False):
if learned:
m = LearnedPositionalEmbedding(num_embeddings + padding_idx + 1, embedding_dim, padding_idx, left_pad)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim, padding_idx, left_pad, num_embeddings + padding_idx + 1)
return m
@register_model_architecture('transformer', 'transformer')
def base_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 2048)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
@register_model_architecture('transformer', 'transformer_iwslt_de_en')
def transformer_iwslt_de_en(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 1024)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 4)
args.encoder_layers = getattr(args, 'encoder_layers', 6)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 1024)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 4)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de')
def transformer_wmt_en_de(args):
base_architecture(args)
# parameters used in the "Attention Is All You Need" paper (Vaswani, et al, 2017)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_de_big')
def transformer_vaswani_wmt_en_de_big(args):
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.3)
base_architecture(args)
@register_model_architecture('transformer', 'transformer_vaswani_wmt_en_fr_big')
def transformer_vaswani_wmt_en_fr_big(args):
args.dropout = getattr(args, 'dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
@register_model_architecture('transformer', 'transformer_wmt_en_de_big')
def transformer_wmt_en_de_big(args):
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
# default parameters used in tensor2tensor implementation
@register_model_architecture('transformer', 'transformer_wmt_en_de_big_t2t')
def transformer_wmt_en_de_big_t2t(args):
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', True)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.relu_dropout = getattr(args, 'relu_dropout', 0.1)
transformer_vaswani_wmt_en_de_big(args)
|
TensorFlow/Segmentation/UNet_Industrial/scripts | scripts | UNet_1GPU_XLA | #!/usr/bin/env bash
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script launches UNet training in FP32 on 1 GPU using 16 batch size (16 per GPU)
# Usage ./UNet_FP32_1GPU_XLA.sh <path to result repository> <path to dataset> <dagm classID (1-10)>
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export TF_CPP_MIN_LOG_LEVEL=3
python "${BASEDIR}/../main.py" \
--unet_variant='tinyUNet' \
--activation_fn='relu' \
--exec_mode='train_and_evaluate' \
--iter_unit='batch' \
--num_iter=2500 \
--batch_size=16 \
--warmup_step=10 \
--results_dir="${1}" \
--data_dir="${2}" \
--dataset_name='DAGM2007' \
--dataset_classID="${3}" \
--data_format='NCHW' \
--use_auto_loss_scaling \
--noamp \
--xla \
--learning_rate=1e-4 \
--learning_rate_decay_factor=0.8 \
--learning_rate_decay_steps=500 \
--rmsprop_decay=0.9 \
--rmsprop_momentum=0.8 \
--loss_fn_name='adaptive_loss' \
--weight_decay=1e-5 \
--weight_init_method='he_uniform' \
--augment_data \
--display_every=250 \
--debug_verbosity=0
|
PyTorch/SpeechSynthesis/FastPitch/fastpitch | fastpitch | model | # *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
from common.layers import ConvReLUNorm
from common.utils import mask_from_lens
from fastpitch.alignment import b_mas, mas_width1
from fastpitch.attention import ConvAttention
from fastpitch.transformer import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
dtype = enc_out.dtype
reps = durations.float() / pace
reps = (reps + 0.5).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
reps_cumsum = torch.cumsum(F.pad(reps, (1, 0, 0, 0), value=0.0),
dim=1)[:, None, :]
reps_cumsum = reps_cumsum.to(dtype)
range_ = torch.arange(max_len, device=enc_out.device)[None, :, None]
mult = ((reps_cumsum[:, :, :-1] <= range_) &
(reps_cumsum[:, :, 1:] > range_))
mult = mult.to(dtype)
enc_rep = torch.matmul(mult, enc_out)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
def average_pitch(pitch, durs):
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = F.pad(durs_cums_ends[:, :-1], (1, 0))
pitch_nonzero_cums = F.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0))
pitch_cums = F.pad(torch.cumsum(pitch, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = pitch.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
pitch_sums = (torch.gather(pitch_cums, 2, dce)
- torch.gather(pitch_cums, 2, dcs)).float()
pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce)
- torch.gather(pitch_nonzero_cums, 2, dcs)).float()
pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems,
pitch_sums / pitch_nelems)
return pitch_avg
class TemporalPredictor(nn.Module):
"""Predicts a single float per each temporal location"""
def __init__(self, input_size, filter_size, kernel_size, dropout,
n_layers=2, n_predictions=1):
super(TemporalPredictor, self).__init__()
self.layers = nn.Sequential(*[
ConvReLUNorm(input_size if i == 0 else filter_size, filter_size,
kernel_size=kernel_size, dropout=dropout)
for i in range(n_layers)]
)
self.n_predictions = n_predictions
self.fc = nn.Linear(filter_size, self.n_predictions, bias=True)
def forward(self, enc_out, enc_out_mask):
out = enc_out * enc_out_mask
out = self.layers(out.transpose(1, 2)).transpose(1, 2)
out = self.fc(out) * enc_out_mask
return out
class FastPitch(nn.Module):
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitch, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
self.attention = ConvAttention(
n_mel_channels, 0, symbols_embedding_dim,
use_query_proj=True, align_query_enc_type='3xconv')
def binarize_attention(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_out_cpu = np.zeros(attn.data.shape, dtype=np.float32)
log_attn_cpu = torch.log(attn.data).to(device='cpu', dtype=torch.float32)
log_attn_cpu = log_attn_cpu.numpy()
out_lens_cpu = out_lens.cpu()
in_lens_cpu = in_lens.cpu()
for ind in range(b_size):
hard_attn = mas_width1(
log_attn_cpu[ind, 0, :out_lens_cpu[ind], :in_lens_cpu[ind]])
attn_out_cpu[ind, 0, :out_lens_cpu[ind], :in_lens_cpu[ind]] = hard_attn
attn_out = torch.tensor(
attn_out_cpu, device=attn.get_device(), dtype=attn.dtype)
return attn_out
def binarize_attention_parallel(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
with torch.no_grad():
log_attn_cpu = torch.log(attn.data).cpu().numpy()
attn_out = b_mas(log_attn_cpu, in_lens.cpu().numpy(),
out_lens.cpu().numpy(), width=1)
return torch.from_numpy(attn_out).to(attn.get_device())
def forward(self, inputs, use_gt_pitch=True, pace=1.0, max_duration=75):
(inputs, input_lens, mel_tgt, mel_lens, pitch_dense, energy_dense,
speaker, attn_prior, audiopaths) = inputs
text_max_len = inputs.size(1)
mel_max_len = mel_tgt.size(2)
# Calculate speaker embedding
if self.speaker_emb is None:
spk_emb = 0
else:
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Predict pitch
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
# Alignment
text_emb = self.encoder.word_emb(inputs)
# make sure to do the alignments before folding
attn_mask = mask_from_lens(input_lens, max_len=text_max_len)
attn_mask = attn_mask[..., None] == 0
# attn_mask should be 1 for unused timesteps in the text_enc_w_spkvec tensor
attn_soft, attn_logprob = self.attention(
mel_tgt, text_emb.permute(0, 2, 1), mel_lens, attn_mask,
key_lens=input_lens, keys_encoded=enc_out, attn_prior=attn_prior)
attn_hard = self.binarize_attention(attn_soft, input_lens, mel_lens)
# Viterbi --> durations
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
dur_tgt = attn_hard_dur
assert torch.all(torch.eq(dur_tgt.sum(dim=1), mel_lens))
# Average pitch over characters
pitch_tgt = average_pitch(pitch_dense, dur_tgt)
if use_gt_pitch and pitch_tgt is not None:
pitch_emb = self.pitch_emb(pitch_tgt)
else:
pitch_emb = self.pitch_emb(pitch_pred)
enc_out = enc_out + pitch_emb.transpose(1, 2)
# Predict energy
if self.energy_conditioning:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
# Average energy over characters
energy_tgt = average_pitch(energy_dense.unsqueeze(1), dur_tgt)
energy_tgt = torch.log(1.0 + energy_tgt)
energy_emb = self.energy_emb(energy_tgt)
energy_tgt = energy_tgt.squeeze(1)
enc_out = enc_out + energy_emb.transpose(1, 2)
else:
energy_pred = None
energy_tgt = None
len_regulated, dec_lens = regulate_len(
dur_tgt, enc_out, pace, mel_max_len)
# Output FFT
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
return (mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred,
pitch_tgt, energy_pred, energy_tgt, attn_soft, attn_hard,
attn_hard_dur, attn_logprob)
def infer(self, inputs, pace=1.0, dur_tgt=None, pitch_tgt=None,
energy_tgt=None, pitch_transform=None, max_duration=75,
speaker=0):
if self.speaker_emb is None:
spk_emb = 0
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_transform is not None:
if self.pitch_std[0] == 0.0:
# XXX LJSpeech-1.1 defaults
mean, std = 218.14, 67.24
else:
mean, std = self.pitch_mean[0], self.pitch_std[0]
pitch_pred = pitch_transform(pitch_pred, enc_mask.sum(dim=(1,2)),
mean, std)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
|
PyTorch/SpeechSynthesis/HiFiGAN/hifigan | hifigan | data_function | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# mel_spectrogram, MelDataset
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
from numpy import random
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from common.audio_processing import dynamic_range_compression
from common.utils import load_filepaths_and_text, load_wav
MAX_WAV_VALUE = 32768.0
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size,
fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
fmax_key = f'{fmax}_{y.device}'
if fmax_key not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels,
fmin=fmin, fmax=fmax)
mel_basis[fmax_key] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
pad = int((n_fft-hop_size)/2)
y = F.pad(y.unsqueeze(1), (pad, pad), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size,
window=hann_window[str(y.device)], center=center,
pad_mode='reflect', normalized=False, onesided=True,
return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = dynamic_range_compression(spec) # spectral normalize
return spec
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True,
device=None, fmax_loss=None, fine_tuning=False,
base_mels_path=None, repeat=1, deterministic=False,
max_wav_value=MAX_WAV_VALUE):
self.audio_files = training_files
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.max_wav_value = max_wav_value
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
self.repeat = repeat
self.deterministic = deterministic
self.rng = random.default_rng()
def __getitem__(self, index):
if index >= len(self):
raise IndexError('Dataset index out of range')
rng = random.default_rng(index) if self.deterministic else self.rng
index = index % len(self.audio_files) # collapse **after** setting seed
filename = self.audio_files[index]
audio, sampling_rate = load_wav(filename)
audio = audio / self.max_wav_value
if not self.fine_tuning:
audio = normalize(audio) * 0.95
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = rng.integers(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path,
os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel).float()
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = rng.integers(0, mel.size(2) - frames_per_seg)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
a = mel_start * self.hop_size
b = (mel_start + frames_per_seg) * self.hop_size
audio = audio[:, a:b]
else:
mel = F.pad(mel, (0, frames_per_seg - mel.size(2)))
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files) * self.repeat
def get_data_loader(args, distributed_run, train=True, batch_size=None,
val_kwargs=None):
filelists = args.training_files if train else args.validation_files
files = load_filepaths_and_text(args.dataset_path, filelists)
files = list(zip(*files))[0]
dataset_kw = {
'segment_size': args.segment_size,
'n_fft': args.filter_length,
'num_mels': args.num_mels,
'hop_size': args.hop_length,
'win_size': args.win_length,
'sampling_rate': args.sampling_rate,
'fmin': args.mel_fmin,
'fmax': args.mel_fmax,
'fmax_loss': args.mel_fmax_loss,
'max_wav_value': args.max_wav_value,
'fine_tuning': args.fine_tuning,
'base_mels_path': args.input_mels_dir,
'deterministic': not train
}
if train:
dataset = MelDataset(files, **dataset_kw)
sampler = DistributedSampler(dataset) if distributed_run else None
else:
dataset_kw.update(val_kwargs or {})
dataset = MelDataset(files, **dataset_kw)
sampler = (DistributedSampler(dataset, shuffle=False)
if distributed_run else None)
loader = DataLoader(dataset,
num_workers=args.num_workers if train else 1,
shuffle=(train and not distributed_run),
sampler=sampler,
batch_size=batch_size or args.batch_size,
pin_memory=True,
persistent_workers=True,
drop_last=train)
return loader
|
CUDA-Optimized/FastSpeech/fastspeech/hparams | hparams | train | # Inheritance
parent_yaml: "base.yaml"
# Data
meta_file: "metadata_train.csv"
# Train
n_workers: 16 # Num of workers used in data loader.
learning_rate: 0.001 # Learning rate.
warmup_steps: 4000 # Num of warmup steps.
batch_size: 16 # Batch size.
final_steps: 320000 # Num of steps to the end.
save_step: 1000 # Step interval in checkpointing.
log_step: 1 # Step interval in logging on command-line and Tensorboard.
use_amp: False # Usage of AMP.
pyprof_enabled: False # Usage of pyprof. Enable it only for profiling use. |
TensorFlow/LanguageModeling/Transformer-XL/tf/scripts | scripts | inference_benchmark | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
BATCH_SIZES=(1 2 4 8 16 32)
# "empty" MATH corresponds to fp32
MATHS=("" "--amp")
for (( j = 0; j < ${#BATCH_SIZES[@]}; j++ )); do
for (( k = 0; k < ${#MATHS[@]}; k++ )); do
echo batch size: ${BATCH_SIZES[j]} math: ${MATHS[k]}
taskset -c 0 bash run_wt103_base.sh eval \
--eval_batch_size "${BATCH_SIZES[j]}" \
"${MATHS[k]}" \
"${@:1}"
done
done
|
PyTorch/LanguageModeling/BERT/triton/large/scripts/docker | docker | interactive | #!/usr/bin/env bash
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=0}
docker run -it --rm \
--runtime=nvidia \
-e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \
--net=host \
--shm-size=1g \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
--ipc=host \
-e WORKDIR="$(pwd)" \
-e PYTHONPATH="$(pwd)" \
-v "$(pwd)":"$(pwd)" \
-v /var/run/docker.sock:/var/run/docker.sock \
-w "$(pwd)" \
bert:latest bash
|
PyTorch/SpeechSynthesis/FastPitch/triton | triton | fastpitch_tensor_info | tensor_names:
inputs: [text_padded, input_lengths]
outputs: [mel_padded, output_lengths, dur_padded, pitch_padded]
|
PyTorch/SpeechRecognition/Jasper/common/dali | dali | __init__ | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
CUDA-Optimized/FastSpeech/fastspeech/trt | trt | verify_trt | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pprint
import fire
import torch
from fastspeech import DEFAULT_DEVICE
from fastspeech import hparam as hp
from fastspeech.data_load import PadDataLoader
from fastspeech.dataset.text_dataset import TextDataset
from fastspeech.inferencer.fastspeech_inferencer import FastSpeechInferencer
from fastspeech.model.fastspeech import Fastspeech
from fastspeech.trt.fastspeech_trt_inferencer import FastSpeechTRTInferencer
from fastspeech.utils.logging import tprint
from fastspeech.utils.pytorch import to_cpu_numpy
from collections import OrderedDict
import sys
import numpy as np
from torch.nn import functional as F
# import multiprocessing
# multiprocessing.set_start_method('spawn', True)
pp = pprint.PrettyPrinter(indent=4, width=1000)
np.set_printoptions(threshold=sys.maxsize)
SAMPLE_TEXT = "the more you buy, the more you save."
def verify(hparam="trt.yaml",
text=SAMPLE_TEXT,
**kwargs):
hp.set_hparam(hparam, kwargs)
tprint("Hparams:\n{}".format(pp.pformat(hp)))
tprint("Device count: {}".format(torch.cuda.device_count()))
outs_trt, acts_trt = infer_trt(text)
outs, acts = infer_pytorch(text)
both, pytorch, trt = join_dict(acts, acts_trt)
# print diff
print("## Diff ##\n\n")
for name, (act, act_trt) in both.items():
act = act.float()
act_trt = act_trt.float()
diff = act.reshape(-1) - act_trt.reshape(-1)
is_identical = diff.eq(0).all()
errors = diff[diff.ne(0)]
max_error = torch.max(torch.abs(errors)) if len(errors) > 0 else 0
print("# {} #\n\n[PyTorch]\n{}\n\n[TRT]: \n{}\n\n[Diff]: \n{}\n\n[Errors]: \n{}\n- identical? {}\n- {} errors out of {}\n- max: {}\n\n".format(name,
act,
act_trt,
diff,
errors,
is_identical,
len(errors),
len(diff),
max_error,
))
# print("## PyTorch ##\n\n")
# for name, act in pytorch.items():
# print("[{}]\npytorch:\n{}\n\n".format(name, act))
# print("## TRT ##\n\n")
# for name, act in trt.items():
# print("[{}]\ttrt:\n{}\n\n".format(name, act_trt))
def join_dict(acts, acts_trt):
both = dict()
left = dict()
right = dict()
for k in acts:
if k in acts_trt:
both[k] = (acts[k], acts_trt[k])
else:
left[k] = acts[k]
for k in acts_trt:
if k not in acts:
right[k] = acts_trt[k]
return both, left, right
def infer_trt(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
inferencer = FastSpeechTRTInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
trt_max_ws_size=hp.trt_max_ws_size,
trt_file_path=hp.trt_file_path,
trt_force_build=hp.trt_force_build,
use_fp16=hp.use_fp16,
trt_max_input_seq_len=hp.trt_max_input_seq_len,
trt_max_output_seq_len=hp.trt_max_output_seq_len,
validate_accuracy=True,
)
with inferencer:
acts = dict()
outs = inferencer.infer(acts=acts)
return outs, acts
def infer_pytorch(text):
# model
model = Fastspeech(
max_seq_len=hp.max_seq_len,
d_model=hp.d_model,
phoneme_side_n_layer=hp.phoneme_side_n_layer,
phoneme_side_head=hp.phoneme_side_head,
phoneme_side_conv1d_filter_size=hp.phoneme_side_conv1d_filter_size,
phoneme_side_output_size=hp.phoneme_side_output_size,
mel_side_n_layer=hp.mel_side_n_layer,
mel_side_head=hp.mel_side_head,
mel_side_conv1d_filter_size=hp.mel_side_conv1d_filter_size,
mel_side_output_size=hp.mel_side_output_size,
duration_predictor_filter_size=hp.duration_predictor_filter_size,
duration_predictor_kernel_size=hp.duration_predictor_kernel_size,
fft_conv1d_kernel=hp.fft_conv1d_kernel,
fft_conv1d_padding=hp.fft_conv1d_padding,
dropout=hp.dropout,
n_mels=hp.num_mels,
fused_layernorm=hp.fused_layernorm
)
# dataset
dataset = TextDataset([text for _ in range(hp.batch_size)])
data_loader = PadDataLoader(dataset,
batch_size=hp.batch_size,
num_workers=hp.n_workers,
drop_last=False)
# inferencer
with torch.no_grad():
inferencer = FastSpeechInferencer('fastspeech',
model,
data_loader=data_loader,
ckpt_path=hp.checkpoint_path,
device='cuda',
use_fp16=hp.use_fp16,
)
acts = dict()
outs = inferencer.infer(acts=acts,
seq_input_len=hp.trt_max_input_seq_len,
seq_output_len=hp.trt_max_output_seq_len)
return outs, acts
if __name__ == '__main__':
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
fire.Fire(verify)
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker/containers | containers | triton_server_container | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
from threading import Thread
from typing import Dict, Generator, Union
from docker.models.containers import ExecResult
from docker.types import DeviceRequest, Ulimit
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ....logger import LOGGER
from ...exceptions import ContainerNotStarted
from ..container import DockerContainer
class TritonServerContainer(DockerContainer):
def __init__(
self,
name: str,
command: str,
image: str,
volumes: Dict,
devices: Union[list, int],
environment: Dict,
log_file: Union[pathlib.Path, str],
network: str = "host",
shm_size: str = "1G",
):
"""
Initialize Triton Server Container
Args:
name: Container name
command: Triton Server command to exec on container start
image: Docker Image
volumes: Volumes to mount inside container
devices: Devices which has to be visible in container
environment: Environment variables
log_file: Path where logs should be saved
network: Network mode
shm_size: Shared memory size
"""
super().__init__(name)
self._image = image
self._command = command
self._volumes = volumes
self._devices = devices
self._environment = environment
self._network = network
self._shm_size = shm_size
self._triton_exec = None
self._logging_thread = None
self._log_file_path = pathlib.Path(log_file)
def start(self) -> None:
"""
Start Triton Server Container
"""
devices = [
DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices),
]
LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}")
LOGGER.info(f"Starting Triton container {self.name}.")
self._container = self._docker_client.containers.run(
image=self._image,
name=self.name,
device_requests=devices,
detach=True,
tty=True,
shm_size=self._shm_size,
ulimits=[
Ulimit(name="memlock", soft=-1, hard=-1),
Ulimit(name="stack", soft=67108864, hard=67108864),
],
volumes=self._volumes,
environment=self._environment,
network_mode=self._network,
auto_remove=True,
ipc_mode="host",
)
LOGGER.info(f"Triton command:")
LOGGER.info(f" {self._command}")
LOGGER.info(f"Starting Triton Server {self.name}.")
self._triton_exec = self._docker_api_client.exec_create(
container=self._container.id,
cmd=self._command,
)
stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True)
self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True)
self._logging_thread.start()
def stop(self) -> None:
"""
Stop Triton Server Container and save logs to file
"""
if self._container is not None:
triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"])
if triton_result.get("ExitCode") not in (0, None):
LOGGER.info(
f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}"
)
LOGGER.info(f"Stopping triton server {self.name}.")
self._container.stop()
self._container = None
self._docker_client.close()
self._docker_api_client.close()
def run(self, command: str) -> ExecResult:
"""
Run command in container
Args:
command: Command to execute
Returns:
ExecResult
"""
if not self._container:
raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.")
return self._container.exec_run(command)
def _logging(self, generator: Generator) -> None:
"""Triton logging thread for Triton Inference Server
Args:
generator (string generator): Triton log stream.
"""
with open(self._log_file_path, mode="w") as file:
try:
while True:
log = next(generator)
txt = log.decode("utf-8")
file.write(txt)
except StopIteration:
LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
|
TensorFlow/Detection/SSD/models/research/object_detection/core | core | batcher | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides functions to batch a dictionary of input tensors."""
import collections
import tensorflow as tf
from object_detection.core import prefetcher
rt_shape_str = '_runtime_shapes'
class BatchQueue(object):
"""BatchQueue class.
This class creates a batch queue to asynchronously enqueue tensors_dict.
It also adds a FIFO prefetcher so that the batches are readily available
for the consumers. Dequeue ops for a BatchQueue object can be created via
the Dequeue method which evaluates to a batch of tensor_dict.
Example input pipeline with batching:
------------------------------------
key, string_tensor = slim.parallel_reader.parallel_read(...)
tensor_dict = decoder.decode(string_tensor)
tensor_dict = preprocessor.preprocess(tensor_dict, ...)
batch_queue = batcher.BatchQueue(tensor_dict,
batch_size=32,
batch_queue_capacity=2000,
num_batch_queue_threads=8,
prefetch_queue_capacity=20)
tensor_dict = batch_queue.dequeue()
outputs = Model(tensor_dict)
...
-----------------------------------
Notes:
-----
This class batches tensors of unequal sizes by zero padding and unpadding
them after generating a batch. This can be computationally expensive when
batching tensors (such as images) that are of vastly different sizes. So it is
recommended that the shapes of such tensors be fully defined in tensor_dict
while other lightweight tensors such as bounding box corners and class labels
can be of varying sizes. Use either crop or resize operations to fully define
the shape of an image in tensor_dict.
It is also recommended to perform any preprocessing operations on tensors
before passing to BatchQueue and subsequently calling the Dequeue method.
Another caveat is that this class does not read the last batch if it is not
full. The current implementation makes it hard to support that use case. So,
for evaluation, when it is critical to run all the examples through your
network use the input pipeline example mentioned in core/prefetcher.py.
"""
def __init__(self, tensor_dict, batch_size, batch_queue_capacity,
num_batch_queue_threads, prefetch_queue_capacity):
"""Constructs a batch queue holding tensor_dict.
Args:
tensor_dict: dictionary of tensors to batch.
batch_size: batch size.
batch_queue_capacity: max capacity of the queue from which the tensors are
batched.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: max capacity of the queue used to prefetch
assembled batches.
"""
# Remember static shapes to set shapes of batched tensors.
static_shapes = collections.OrderedDict(
{key: tensor.get_shape() for key, tensor in tensor_dict.items()})
# Remember runtime shapes to unpad tensors after batching.
runtime_shapes = collections.OrderedDict(
{(key + rt_shape_str): tf.shape(tensor)
for key, tensor in tensor_dict.items()})
all_tensors = tensor_dict
all_tensors.update(runtime_shapes)
batched_tensors = tf.train.batch(
all_tensors,
capacity=batch_queue_capacity,
batch_size=batch_size,
dynamic_pad=True,
num_threads=num_batch_queue_threads)
self._queue = prefetcher.prefetch(batched_tensors,
prefetch_queue_capacity)
self._static_shapes = static_shapes
self._batch_size = batch_size
def dequeue(self):
"""Dequeues a batch of tensor_dict from the BatchQueue.
TODO: use allow_smaller_final_batch to allow running over the whole eval set
Returns:
A list of tensor_dicts of the requested batch_size.
"""
batched_tensors = self._queue.dequeue()
# Separate input tensors from tensors containing their runtime shapes.
tensors = {}
shapes = {}
for key, batched_tensor in batched_tensors.items():
unbatched_tensor_list = tf.unstack(batched_tensor)
for i, unbatched_tensor in enumerate(unbatched_tensor_list):
if rt_shape_str in key:
shapes[(key[:-len(rt_shape_str)], i)] = unbatched_tensor
else:
tensors[(key, i)] = unbatched_tensor
# Undo that padding using shapes and create a list of size `batch_size` that
# contains tensor dictionaries.
tensor_dict_list = []
batch_size = self._batch_size
for batch_id in range(batch_size):
tensor_dict = {}
for key in self._static_shapes:
tensor_dict[key] = tf.slice(tensors[(key, batch_id)],
tf.zeros_like(shapes[(key, batch_id)]),
shapes[(key, batch_id)])
tensor_dict[key].set_shape(self._static_shapes[key])
tensor_dict_list.append(tensor_dict)
return tensor_dict_list
|
TensorFlow/Segmentation/UNet_3D_Medical/runtime | runtime | hooks | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Hooks for metric collection and benchmarking """
import time
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
def get_hooks(params, logger):
""" Get the appropriate set of hooks given the configuration
:param params: Dict with additional parameters
:param logger: Logger object
:return: Set of hooks
"""
hooks = []
if params.exec_mode == 'debug_train':
return get_debug_training_hooks(logger, params)
if params.exec_mode == 'debug_predict':
return get_debug_predict_hooks(logger, params)
if 'train' in params.exec_mode:
return get_training_hooks(logger, params)
if params.exec_mode == 'predict':
return get_predict_hooks(logger, params)
return hooks
def get_debug_predict_hooks(logger, params):
""" Return hooks for debugging prediction
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = []
if hvd.rank() == 0:
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=params.batch_size,
logger=logger,
mode='inference')]
return hooks
def get_debug_training_hooks(logger, params):
""" Return hooks for debugging training
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if hvd.rank() == 0:
hooks += [TrainingHook(log_every=params.log_every,
logger=logger,
tensor_names=['total_loss_ref:0']),
ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=hvd.size() * params.batch_size,
logger=logger,
mode='train')]
return hooks
def get_predict_hooks(logger, params):
""" Return hooks for prediction
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = []
if hvd.rank() == 0:
if params.benchmark:
hooks = [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=params.batch_size,
logger=logger,
mode='test')]
return hooks
def get_training_hooks(logger, params):
""" Return hooks for training
:param logger: Logger object
:param params: Dict with additional parameters
:return: Estimator hooks
"""
hooks = [hvd.BroadcastGlobalVariablesHook(0)]
if hvd.rank() == 0:
hooks += [OomReportingHook()]
if params.benchmark:
hooks += [ProfilingHook(warmup_steps=params.warmup_steps,
global_batch_size=hvd.size() * params.batch_size,
logger=logger,
mode='train')]
else:
hooks += [TrainingHook(log_every=params.log_every,
logger=logger,
tensor_names=['total_loss_ref:0'])]
return hooks
class ProfilingHook(tf.estimator.SessionRunHook):
""" Hook for profiling metrics """
def __init__(self, warmup_steps, global_batch_size, logger, mode):
""" Build hook
:param warmup_steps: Number of steps to skip initially
:param global_batch_size: Number of samples per bach in all gpus
:param logger: Logger object
:param mode: Estimator's execution mode
"""
self._warmup_steps = warmup_steps
self._global_batch_size = global_batch_size
self._step = 0
self._timestamps = []
self._logger = logger
self._mode = mode
def before_run(self, _):
""" Execute before run """
self._step += 1
if self._step >= self._warmup_steps:
self._timestamps.append(time.time())
def end(self, _):
""" Execute on completion """
deltas = np.array([self._timestamps[i + 1] - self._timestamps[i] for i in range(len(self._timestamps) - 1)])
stats = process_performance_stats(np.array(deltas),
self._global_batch_size,
self._mode)
self._logger.log(step=(), data=stats)
self._logger.flush()
class TrainingHook(tf.estimator.SessionRunHook):
""" Hook for training metrics """
def __init__(self, log_every, logger, tensor_names):
""" Build hook for training
:param log_every: Logging frequency
:param logger: Logger object
:param tensor_names: Names of the tensors to log
"""
self._log_every = log_every
self._step = 0
self._logger = logger
self._tensor_names = tensor_names
def before_run(self, _):
""" Execute before run """
run_args = tf.compat.v1.train.SessionRunArgs(
fetches=self._tensor_names
)
return run_args
def after_run(self,
_,
run_values):
""" Execute after run
:param run_values: Values to capture
:return:
"""
if self._step % self._log_every == 0:
for i in range(len(self._tensor_names)):
self._logger.log(step=(self._step,), data={self._tensor_names[i]: str(run_values.results[i])})
self._step += 1
def end(self, _):
""" Execute on completion """
self._logger.flush()
class OomReportingHook(tf.estimator.SessionRunHook): # pylint: disable=R0903
""" Report for out of memory errors"""
def before_run(self, _): # pylint: disable=R0201
""" Execute before run """
return tf.estimator.SessionRunArgs(fetches=[], # no extra fetches
options=tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom=True))
def process_performance_stats(timestamps, batch_size, mode):
""" Get confidence intervals
:param timestamps: Collection of timestamps
:param batch_size: Number of samples per batch
:param mode: Estimator's execution mode
:return: Stats
"""
timestamps_ms = 1000 * timestamps
throughput_imgps = (1000.0 * batch_size / timestamps_ms).mean()
stats = {f"throughput_{mode}": throughput_imgps,
f"latency_{mode}_mean": timestamps_ms.mean()}
for level in [90, 95, 99]:
stats.update({f"latency_{mode}_{level}": np.percentile(timestamps_ms, level)})
return stats
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner | runner | exceptions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunnerException(Exception):
"""
Runner Exception
"""
def __init__(self, message: str):
self._message = message
def __str__(self):
return self._message
@property
def message(self):
"""Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
PyTorch/SpeechRecognition/wav2vec2/scripts | scripts | finetune_base_1h | #!/usr/bin/env bash
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
# A100 80GiB FP16: UPDATE_FREQ=1
# A100 80GiB TF32: UPDATE_FREQ=1
# IO
: ${DATASET_DIR:="/datasets/LibriSpeech"}
: ${TRAIN_SUBSET:="train-1h"}
: ${OUTPUT_DIR:="results/finetune_base_1h"}
: ${PRETRAINED_MODEL:=results/pretrain_base/wav2vec2_update400000.pt}
# Batching
: ${NUM_GPUS:=8}
: ${MAX_TOKENS:=3200000}
: ${NUM_CONCAT_BATCHES:=1}
: ${UPDATE_FREQ:=1}
# Training
: ${LEARNING_RATE:=0.00005}
: ${FREEZE_FINETUNE_UPDATES:=10000}
: ${MAX_UPDATE:=13000}
: ${MASK_CHANNEL_PROB:=0.25}
: ${MASK_PROB:=0.65}
bash scripts/finetune_vox_960h.sh "$@"
|
Tools/PyTorch/TimeSeriesPredictionPlatform | TimeSeriesPredictionPlatform | hydra_utils | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hydra import compose, initialize
from hydra.core.global_hydra import GlobalHydra
from hydra.core.hydra_config import HydraConfig
from omegaconf import OmegaConf
def get_config(config_name, config_path, override_list=None, return_hydra_config=False):
GlobalHydra.instance().clear()
initialize(config_path=config_path)
cfg = compose(config_name, return_hydra_config=return_hydra_config, overrides=override_list)
if return_hydra_config:
HydraConfig().cfg = cfg
OmegaConf.resolve(cfg)
return cfg
|
PyTorch/Classification/ConvNets/efficientnet/inference/TF32 | TF32 | DGXA100_efficientnet-widese-b0_TF32 |
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 1 --workspace ${1:-./} --raport-file raport_1.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 2 --workspace ${1:-./} --raport-file raport_2.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 4 --workspace ${1:-./} --raport-file raport_4.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 8 --workspace ${1:-./} --raport-file raport_8.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 16 --workspace ${1:-./} --raport-file raport_16.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 32 --workspace ${1:-./} --raport-file raport_32.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 64 --workspace ${1:-./} --raport-file raport_64.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 128 --workspace ${1:-./} --raport-file raport_128.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b0 --precision TF32 --mode benchmark_inference --platform DGXA100 /imagenet -b 256 --workspace ${1:-./} --raport-file raport_256.json
|
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cpu | cpu | ROIAlign_cpu | // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include "cpu/vision.h"
// implementation taken from Caffe2
template <typename T>
struct PreCalc {
int pos1;
int pos2;
int pos3;
int pos4;
T w1;
T w2;
T w3;
T w4;
};
template <typename T>
void pre_calc_for_bilinear_interpolate(
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int iy_upper,
const int ix_upper,
T roi_start_h,
T roi_start_w,
T bin_size_h,
T bin_size_w,
int roi_bin_grid_h,
int roi_bin_grid_w,
std::vector<PreCalc<T>>& pre_calc) {
int pre_calc_index = 0;
for (int ph = 0; ph < pooled_height; ph++) {
for (int pw = 0; pw < pooled_width; pw++) {
for (int iy = 0; iy < iy_upper; iy++) {
const T yy = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < ix_upper; ix++) {
const T xx = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T x = xx;
T y = yy;
// deal with: inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
PreCalc<T> pc;
pc.pos1 = 0;
pc.pos2 = 0;
pc.pos3 = 0;
pc.pos4 = 0;
pc.w1 = 0;
pc.w2 = 0;
pc.w3 = 0;
pc.w4 = 0;
pre_calc[pre_calc_index] = pc;
pre_calc_index += 1;
continue;
}
if (y <= 0) {
y = 0;
}
if (x <= 0) {
x = 0;
}
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
// save weights and indeces
PreCalc<T> pc;
pc.pos1 = y_low * width + x_low;
pc.pos2 = y_low * width + x_high;
pc.pos3 = y_high * width + x_low;
pc.pos4 = y_high * width + x_high;
pc.w1 = w1;
pc.w2 = w2;
pc.w3 = w3;
pc.w4 = w4;
pre_calc[pre_calc_index] = pc;
pre_calc_index += 1;
}
}
}
}
}
template <typename T>
void ROIAlignForward_cpu_kernel(
const int nthreads,
const T* bottom_data,
const T& spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int sampling_ratio,
const T* bottom_rois,
//int roi_cols,
T* top_data) {
//AT_ASSERT(roi_cols == 4 || roi_cols == 5);
int roi_cols = 5;
int n_rois = nthreads / channels / pooled_width / pooled_height;
// (n, c, ph, pw) is an element in the pooled output
// can be parallelized using omp
// #pragma omp parallel for num_threads(32)
for (int n = 0; n < n_rois; n++) {
int index_n = n * channels * pooled_width * pooled_height;
// roi could have 4 or 5 columns
const T* offset_bottom_rois = bottom_rois + n * roi_cols;
int roi_batch_ind = 0;
if (roi_cols == 5) {
roi_batch_ind = offset_bottom_rois[0];
offset_bottom_rois++;
}
// Do not using rounding; this implementation detail is critical
T roi_start_w = offset_bottom_rois[0] * spatial_scale;
T roi_start_h = offset_bottom_rois[1] * spatial_scale;
T roi_end_w = offset_bottom_rois[2] * spatial_scale;
T roi_end_h = offset_bottom_rois[3] * spatial_scale;
// T roi_start_w = round(offset_bottom_rois[0] * spatial_scale);
// T roi_start_h = round(offset_bottom_rois[1] * spatial_scale);
// T roi_end_w = round(offset_bottom_rois[2] * spatial_scale);
// T roi_end_h = round(offset_bottom_rois[3] * spatial_scale);
// Force malformed ROIs to be 1x1
T roi_width = std::max(roi_end_w - roi_start_w, (T)1.);
T roi_height = std::max(roi_end_h - roi_start_h, (T)1.);
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
// we want to precalculate indeces and weights shared by all chanels,
// this is the key point of optimiation
std::vector<PreCalc<T>> pre_calc(
roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
pre_calc_for_bilinear_interpolate(
height,
width,
pooled_height,
pooled_width,
roi_bin_grid_h,
roi_bin_grid_w,
roi_start_h,
roi_start_w,
bin_size_h,
bin_size_w,
roi_bin_grid_h,
roi_bin_grid_w,
pre_calc);
for (int c = 0; c < channels; c++) {
int index_n_c = index_n + c * pooled_width * pooled_height;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
int pre_calc_index = 0;
for (int ph = 0; ph < pooled_height; ph++) {
for (int pw = 0; pw < pooled_width; pw++) {
int index = index_n_c + ph * pooled_width + pw;
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) {
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
PreCalc<T> pc = pre_calc[pre_calc_index];
output_val += pc.w1 * offset_bottom_data[pc.pos1] +
pc.w2 * offset_bottom_data[pc.pos2] +
pc.w3 * offset_bottom_data[pc.pos3] +
pc.w4 * offset_bottom_data[pc.pos4];
pre_calc_index += 1;
}
}
output_val /= count;
top_data[index] = output_val;
} // for pw
} // for ph
} // for c
} // for n
}
at::Tensor ROIAlign_forward_cpu(const at::Tensor& input,
const at::Tensor& rois,
const float spatial_scale,
const int pooled_height,
const int pooled_width,
const int sampling_ratio) {
AT_ASSERTM(!input.is_cuda(), "input must be a CPU tensor");
AT_ASSERTM(!rois.is_cuda(), "rois must be a CPU tensor");
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
if (output.numel() == 0) {
return output;
}
AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "ROIAlign_forward", [&] {
ROIAlignForward_cpu_kernel<scalar_t>(
output_size,
input.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
rois.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
return output;
}
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | inception_v1_test | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for nets.inception_v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'InceptionV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('InceptionV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_6c, end_points = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b',
'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c',
'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2',
'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d',
'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b',
'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v1_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64],
'MaxPool_2a_3x3': [5, 56, 56, 64],
'Conv2d_2b_1x1': [5, 56, 56, 64],
'Conv2d_2c_3x3': [5, 56, 56, 192],
'MaxPool_3a_3x3': [5, 28, 28, 192],
'Mixed_3b': [5, 28, 28, 256],
'Mixed_3c': [5, 28, 28, 480],
'MaxPool_4a_3x3': [5, 14, 14, 480],
'Mixed_4b': [5, 14, 14, 512],
'Mixed_4c': [5, 14, 14, 512],
'Mixed_4d': [5, 14, 14, 512],
'Mixed_4e': [5, 14, 14, 528],
'Mixed_4f': [5, 14, 14, 832],
'MaxPool_5a_2x2': [5, 7, 7, 832],
'Mixed_5b': [5, 7, 7, 832],
'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, _ = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 224, 224
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
def testNoBatchNormScaleByDefault(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1(inputs, num_classes, is_training=False)
self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), [])
def testBatchNormScale(self):
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (1, height, width, 3))
with slim.arg_scope(
inception.inception_v1_arg_scope(batch_norm_scale=True)):
inception.inception_v1(inputs, num_classes, is_training=False)
gamma_names = set(
v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
self.assertGreater(len(gamma_names), 0)
for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/SpeechRecognition/QuartzNet/scripts | scripts | evaluation | #!/bin/bash
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -a
: ${PREDICTION_FILE:=}
bash ./scripts/inference.sh "$@"
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | cudaUtils | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "cudaUtils.h"
#include "cuda_fp16.h"
#include "cuda_runtime.h"
#include <iostream>
#include <stdexcept>
#include <string>
namespace tts
{
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
void CudaUtils::sync(cudaStream_t stream)
{
check(cudaStreamSynchronize(stream), "CudaUtils::sync(stream)");
}
void CudaUtils::printDeviceInformation()
{
int device;
cudaError_t err = cudaGetDevice(&device);
if (err != cudaSuccess)
{
throw std::runtime_error("Failed to get active device: " + std::to_string(err));
}
std::cout << "Available devices:" << std::endl;
int nDevices;
err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess)
{
throw std::runtime_error("Failed to get device count: " + std::to_string(err));
}
for (int i = 0; i < nDevices; ++i)
{
cudaDeviceProp prop;
err = cudaGetDeviceProperties(&prop, i);
if (err != cudaSuccess)
{
throw std::runtime_error(
"Failed to get device properties for device " + std::to_string(i) + " : " + std::to_string(err));
}
std::cout << "Device: " << i << " : '" << prop.name << "'";
std::cout << ", ";
std::cout << prop.multiProcessorCount << " SMs";
if (prop.cooperativeLaunch)
{
std::cout << ", ";
std::cout << "support Co-op Launch";
}
if (i == device)
{
std::cout << " <- [ ACTIVE ]";
}
std::cout << std::endl;
}
}
int CudaUtils::getNumSM()
{
int device;
cudaError_t err = cudaGetDevice(&device);
if (err != cudaSuccess)
{
throw std::runtime_error("Failed to get active device: " + std::to_string(err));
}
cudaDeviceProp prop;
err = cudaGetDeviceProperties(&prop, device);
if (err != cudaSuccess)
{
throw std::runtime_error("Failed to device properties: " + std::to_string(err));
}
return prop.multiProcessorCount;
}
} // namespace tts
|
PyTorch/Detection/Efficientdet/utils | utils | distributed_sampler | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples |
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data | data | collate_batch | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from maskrcnn_benchmark.structures.image_list import to_image_list
class BatchCollator(object):
"""
From a list of samples from the dataset,
returns the batched images and targets.
This should be passed to the DataLoader
"""
def __init__(self, size_divisible=0):
self.size_divisible = size_divisible
def __call__(self, batch):
transposed_batch = list(zip(*batch))
images = to_image_list(transposed_batch[0], self.size_divisible)
targets = transposed_batch[1]
img_ids = transposed_batch[2]
return images, targets, img_ids
|
PaddlePaddle/LanguageModeling/BERT/scripts | scripts | run_pretraining_p1 | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3,4,5,6,7" \
./run_pretraining.py \
--input-dir=pretrain/phase1/unbinned/parquet \
--vocab-file=vocab/bert-large-uncased-vocab.txt \
--output-dir=./results/checkpoints \
--bert-model=bert-large-uncased \
--from-checkpoint=./results/checkpoints/bert-large-uncased/phase1 \
--last-step-of-checkpoint=auto \
--batch-size=256 \
--max-steps=7038 \
--num-steps-per-checkpoint=200 \
--log-freq=1 \
--max-seq-length=128 \
--max-predictions-per-seq=20 \
--gradient-merge-steps=32 \
--amp \
--use-dynamic-loss-scaling \
--optimizer=Lamb \
--fuse-mha \
--phase1 \
--scale-loss=1048576 \
--learning-rate=6e-3 \
--warmup-proportion=0.2843 \
--report-file=./results/dllogger_p1.json
|
PyTorch/Recommendation/DLRM/dlrm/cuda_src/sparse_gather | sparse_gather | sparse_pytorch_ops | #include <torch/extension.h>
torch::Tensor gather_gpu_fwd(torch::Tensor input, torch::Tensor weight);
void gather_gpu_bwd_fuse_sgd(const torch::Tensor grad, const torch::Tensor indices, float lr, torch::Tensor weight);
torch::Tensor gather_gpu_bwd(const torch::Tensor grad, const torch::Tensor indices, const int num_features);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("gather_gpu_fwd", &gather_gpu_fwd, "Embedding gather", py::arg("indices"), py::arg("weight"));
m.def("gather_gpu_bwd_fuse_sgd", &gather_gpu_bwd_fuse_sgd, "Embedding gather backward with fused plain SGD",
py::arg("grad"), py::arg("indices"), py::arg("lr"), py::arg("weight"));
m.def("gather_gpu_bwd", &gather_gpu_bwd, "Embedding gather backward",
py::arg("grad"), py::arg("indices"), py::arg("num_features"));
}
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo | speech_ai_demo | speech_ai_demo | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import sounddevice as sd
print(sd.query_devices())
# In[ ]:
sd.default.device = 11
# In[ ]:
import sys
import os
import time
import numpy as np
import collections
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
from matplotlib import cm as cm
from IPython.display import Audio, display, clear_output, Markdown, Image
import librosa
import librosa.display
import ipywidgets as widgets
#
# import tacotron2 preprocessing utilities
from utils.tacotron2.symbols import symbols
from utils.tacotron2 import text_to_sequence as text_to_sequence_internal
# import bert pre- and postprocessing utilities
from utils.bert.preprocessing import convert_example_to_feature, read_squad_example, get_predictions
from utils.bert.tokenization import BertTokenizer
# import jasper pre- and postprocessing utilities
from utils.jasper.speech_utils import AudioSegment, SpeechClient
# import trtis api
from tensorrtserver.api import *
defaults = {
# settings
'sigma_infer': 0.6, # don't touch this
'sampling_rate': 22050, # don't touch this
'stft_hop_length': 256, # don't touch this
'url': 'localhost:8000', # don't touch this
'protocol': 0, # 0: http, 1: grpc
'autoplay': True, # autoplay
'character_limit_min': 4, # don't touch this
'character_limit_max': 124, # don't touch this
'vocab_file': "./utils/bert/vocab.txt", # don't touch this
'do_lower_case': True, # don't touch this
'version_2_with_negative': False, # if true, the model may give 'i don't know' as an answer. the model has to be trained for it.
'max_seq_length': 384, # the maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded.
'doc_stride': 128, # when splitting up a long document into chunks, how much stride to take between chunks
'max_query_length': 64, # the maximum number of tokens in the question. Questions longer than this will be truncated to this length
'n_best_size': 10, # don't touch this
'max_answer_length': 30, # don't touch this
'do_lower_case': True, # don't touch this
'null_score_diff_threshold': 0.0, # don't touch this
'jasper_batch_size': 1, # don't touch this
'jasper_sampling_rate': 44100, # don't touch this
'record_maximum_seconds': 4.0 # maximum number of seconds to record
}
# create args object
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
args = Struct(**defaults)
# create the inference context for the models
infer_ctx_bert = InferContext(args.url, args.protocol, 'bertQA-ts-script', -1)
infer_ctx_tacotron2 = InferContext(args.url, args.protocol, 'tacotron2', -1)
infer_ctx_waveglow = InferContext(args.url, args.protocol, 'waveglow-trt', -1)
infer_jasper = SpeechClient(args.url, args.protocol, 'jasper-trt-ensemble', -1,
args.jasper_batch_size, 'pyt', verbose=False,
mode='asynchronous', from_features=False)
def display_sequences(sequences, labels, colors):
''' displays sequences on a dotted plot '''
plt.figure(figsize=(10, 2.5))
plt.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labelleft=False)
for sequence,color,label in zip(sequences,colors,labels):
plt.plot(sequence, color, label=label)
plt.legend(loc='upper right')
plt.show()
def display_heatmap(sequence, title='preprocessed text'):
''' displays sequence as a heatmap '''
clear_output(wait=True)
sequence = sequence[None, :]
plt.figure(figsize=(10, 2.5))
plt.title(title)
plt.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
right=False,
labelbottom=False,
labelleft=False)
plt.imshow(sequence, cmap='BrBG_r', interpolation='nearest')
plt.show()
def display_sound(signal, title, color):
''' displays signal '''
clear_output(wait=True)
plt.figure(figsize=(10, 2.5))
plt.title(title)
plt.tick_params(
axis='both',
which='both',
bottom=True,
top=False,
left=False,
right=False,
labelbottom=True,
labelleft=False)
librosa.display.waveplot(signal, color=color)
plt.show()
def display_spectrogram(mel, title):
''' displays mel spectrogram '''
clear_output(wait=True)
fig = plt.figure(figsize=(10, 2.5))
ax = fig.add_subplot(111)
# plt.title(title)
plt.tick_params(
axis='both',
which='both',
bottom=True,
top=False,
left=False,
right=False,
labelbottom=True,
labelleft=False)
plt.xlabel('Time')
cmap = cm.get_cmap('jet', 30)
cax = ax.imshow(mel.astype(np.float32), interpolation="nearest", cmap=cmap)
ax.grid(True)
plt.show()
def text_to_sequence(text):
''' preprocessor of tacotron2
::text:: the input str
::returns:: sequence, the preprocessed text
'''
sequence = text_to_sequence_internal(text, ['english_cleaners'])
sequence = np.array(sequence, dtype=np.int64)
return sequence
def sequence_to_mel(sequence):
''' calls tacotron2
::sequence:: int64 numpy array, contains the preprocessed text
::returns:: (mel, mel_lengths) pair
mel is the mel-spectrogram, np.array
mel_lengths contains the length of the unpadded mel, np.array
'''
input_lengths = [len(sequence)]
input_lengths = np.array(input_lengths, dtype=np.int64)
# prepare input/output
input_dict = {}
input_dict['sequence__0'] = (sequence,)
input_dict['input_lengths__1'] = (input_lengths,)
output_dict = {}
output_dict['mel_outputs_postnet__0'] = InferContext.ResultFormat.RAW
output_dict['mel_lengths__1'] = InferContext.ResultFormat.RAW
batch_size = 1
# call tacotron2
result = infer_ctx_tacotron2.run(input_dict, output_dict, batch_size)
# get results
mel = result['mel_outputs_postnet__0'][0] # take only the first instance in the output batch
mel_lengths = result['mel_lengths__1'][0] # take only the first instance in the output batch
return mel, mel_lengths
def mel_to_signal(mel, mel_lengths):
''' calls waveglow
::mel:: mel spectrogram
::mel_lengths:: original length of mel spectrogram
::returns:: waveform
'''
# prepare input/output
mel = np.expand_dims(mel, axis=0)
input_dict = {}
input_dict['mel'] = (mel,)
stride = 256
n_group = 8
z_size = mel.shape[2]*stride//n_group
shape = (1,n_group,z_size)
input_dict['z'] = np.random.normal(0.0, 1.0, shape).astype(mel.dtype)
input_dict['z'] = (input_dict['z'],)
output_dict = {}
output_dict['audio'] = InferContext.ResultFormat.RAW
# call waveglow
result = infer_ctx_waveglow.run(input_dict, output_dict)
# get the results
signal = result['audio'][0] # take only the first instance in the output batch
# postprocessing of waveglow: trimming signal to its actual size
trimmed_length = mel_lengths[0] * args.stft_hop_length
signal = signal[:trimmed_length] # trim
signal = signal.astype(np.float32)
return signal
def question_and_context_to_feature(question_text, context):
tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large
example = read_squad_example(question_text,
context,
version_2_with_negative=args.version_2_with_negative)
feature = convert_example_to_feature(
example=example,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
return example, feature
def button_rec_clicked(change):
if record_seconds.value > 0.0:
with plot_jasper_audio:
clear_output(wait=True)
recording = sd.rec(int(record_seconds.value*args.jasper_sampling_rate), samplerate=args.jasper_sampling_rate, channels=1)
while record_seconds.value > 0:
time.sleep(0.01)
record_seconds.value -= 0.01
sd.wait()
recording = recording.squeeze()
display_sound(recording,'recorded audio','orange')
audio = AudioSegment(recording, args.jasper_sampling_rate).samples
hypotheses = infer_jasper.recognize([audio], ['audio recording'])
question_text.value = str(hypotheses[0]) + '? '
button_rec = widgets.Button(description="RECORD")
button_rec.on_click(button_rec_clicked)
record_seconds = widgets.FloatSlider(min=0.0, max=args.record_maximum_seconds, value=args.record_maximum_seconds,
step=0.1, continuous_update=True, description = "seconds")
buttons = widgets.HBox([button_rec, record_seconds])
question_text = widgets.Textarea(
value='jasper output / bert input question',
placeholder='',
description='',
disabled=False,
continuous_update=True,
layout=widgets.Layout(width='550px', height='40px')
)
context = widgets.Textarea(
value='bert input context',
placeholder='',
description='',
disabled=False,
continuous_update=True,
layout=widgets.Layout(width='550px', height='80px')
)
question_context = widgets.HBox([question_text, context])
response_text = widgets.Textarea(
value='',
placeholder='',
description='',
disabled=False,
continuous_update=True,
layout=widgets.Layout(width='550px', height='40px')
)
def text_to_logits(input_ids_data, segment_ids_data, input_mask_data):
# call bert
input_dict = {}
input_dict['input__0'] = (input_ids_data.astype(np.int64),)
input_dict['input__1'] = (segment_ids_data.astype(np.int64),)
input_dict['input__2'] = (input_mask_data.astype(np.int64),)
batch_size = 1
output_dict = {}
output_dict['output__0'] = InferContext.ResultFormat.RAW
output_dict['output__1'] = InferContext.ResultFormat.RAW
#
result = infer_ctx_bert.run(input_dict, output_dict, batch_size)
#
start_logits = [float(x) for x in result["output__0"][0].flat]
end_logits = [float(x) for x in result["output__1"][0].flat]
return start_logits, end_logits
def question_text_change(change):
text = change['new']
text = text.strip(' ')
length = len(text)
if length < args.character_limit_min: # too short text
return
if text[-1] != '?':
return
# preprocess bert
example, feature = question_and_context_to_feature(text, context.value)
input_ids_data = np.array(feature.input_ids, dtype=np.int64)
input_mask_data = np.array(feature.input_mask, dtype=np.int64)
segment_ids_data = np.array(feature.segment_ids, dtype=np.int64)
L = segment_ids_data.shape[0] - 1
while L > 20 and segment_ids_data[L-20] == 0:
L -= 20
with plot_tensor:
clear_output(wait=True)
C = input_ids_data.max()
sequences = (input_ids_data[:L],C//2*input_mask_data[:L],C*segment_ids_data[:L])
display_sequences(sequences, ('input','mask','segment'), ('r.','b.','g.'))
# call bert
start_logits, end_logits = text_to_logits(input_ids_data, segment_ids_data, input_mask_data)
with plot_logits:
clear_output(wait=True)
start = np.array(start_logits, dtype=np.float32)
end = np.array(end_logits, dtype=np.float32)
sequences = (start[:L], end[:L])
display_sequences(sequences, ('start_logits', 'end_logits'), ('black', 'violet'))
# postprocess bert
prediction = get_predictions(example, feature, start_logits, end_logits,
args.n_best_size, args.max_answer_length, args.do_lower_case,
args.version_2_with_negative, args.null_score_diff_threshold)
response_text.value = prediction[0]["text"] + '. \n'
def context_change(change):
text = change['new']
length = len(text)
if length < args.character_limit_min: # too short text
return
# inference
question_text.value += ' '
def response_text_change(change):
''' this gets called each time text_area.value changes '''
text = change['new']
text = text.strip(' ')
length = len(text)
if length < args.character_limit_min: # too short text
return
if length > args.character_limit_max: # too long text
text_area.value = text[:args.character_limit_max]
return
# preprocess tacotron2
sequence = text_to_sequence(text)
with plot_response_text_preprocessed:
display_heatmap(sequence)
# run tacotron2
mel, mel_lengths = sequence_to_mel(sequence)
with plot_spectrogram:
display_spectrogram(mel, change['new'])
# run waveglow
signal = mel_to_signal(mel, mel_lengths)
with plot_signal:
display_sound(signal, change['new'], 'green')
with plot_play:
clear_output(wait=True)
display(Audio(signal, rate=args.sampling_rate, autoplay=args.autoplay))
def get_output_widget(width, height, object_fit='fill'):
''' creates an output widget with default values and returns it '''
layout = widgets.Layout(width=width,
height=height,
object_fit=object_fit,
object_position = '{center} {center}')
ret = widgets.Output(layout=layout)
return ret
plot_tensor = get_output_widget(width='5in',height='1.75in')
plot_logits = get_output_widget(width='5in',height='1.75in')
plot_response_text_preprocessed = get_output_widget(width='10in',height='1in')
plot_spectrogram = get_output_widget(width='10in',height='2.0in', object_fit='scale-down')
plot_jasper_audio = get_output_widget(width='10in',height='2.0in')
plot_signal = get_output_widget(width='10in',height='2.0in')
plot_play = get_output_widget(width='4in',height='1in')
empty = widgets.VBox([], layout=widgets.Layout(height='1in'))
markdown_z0 = Markdown('**Jasper input**')
markdown_m0 = Markdown('**Jasper output / BERT input**')
markdown_bert = Markdown('**BERT**')
markdown_tacotron2 = Markdown('**Tacotron 2**')
markdown_3 = Markdown('**WaveGlow**')
bert_widgets = widgets.HBox([plot_tensor, plot_logits])
tacotron2_widgets = widgets.HBox([response_text, plot_spectrogram])
display(
empty,
markdown_z0,
buttons,
markdown_m0, question_context,
markdown_bert,
bert_widgets,
markdown_tacotron2,
tacotron2_widgets,
markdown_3,
plot_play,
empty
)
def fill_initial_values():
with plot_jasper_audio:
display_sound(np.zeros(100),"input audio",'orange')
#
context.value = "The man holding the telescope went into a shop to purchase some flowers on the occasion of all saints day. "
# context.value = "William Shakespeare was an English poet, playwright and actor, widely regarded as the greatest writer in the English language and the world's greatest dramatist. He is often called England's national poet and the \"Bard of Avon\"."
question_text.value = ""
fill_initial_values()
response_text.observe(response_text_change, names='value')
question_text.observe(question_text_change, names='value')
context.observe(context_change, names='value')
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model | model | cuml_auto_arima | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: models.stat_models.CUMLAutoARIMA
defaults:
- _self_
- /trainer@_global_/trainer: stattrainer
|
PyTorch/LanguageModeling/BART/utils | utils | callbacks | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import os
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, ProgressBar
from pytorch_lightning.utilities import rank_zero_only
from utils.utils import save_json
from utils.distributed_utils import all_reduce_item, get_world_size
import time
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
logger = logging.getLogger(__name__)
class Seq2SeqLoggingCallback(pl.Callback):
@rank_zero_only
def on_batch_end(self, trainer, pl_module):
lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(lrs)
@rank_zero_only
def _write_logs(
self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True
) -> None:
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****")
metrics = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
od = Path(pl_module.hparams.output_dir)
if type_path == "test":
results_file = od / "test_results.txt"
generations_file = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=True)
generations_file.parent.mkdir(exist_ok=True)
with open(results_file, "a+") as writer:
for key in sorted(metrics):
if key in ["log", "progress_bar", "preds"]:
continue
val = metrics[key]
if isinstance(val, torch.Tensor):
val = val.item()
msg = f"{key}: {val:.6f}\n"
writer.write(msg)
if not save_generations:
return
if "preds" in metrics:
content = "\n".join(metrics["preds"])
generations_file.open("w+").write(content)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.train_tob_list.append(outputs[0][0]["log"]["tpb"])
self.train_time_epoch_list.append(time.time() - self.t0) #Measures ~time for forward + backward + optimizer_step
def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
self.t0 = time.time()
def on_train_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
try:
npars = pl_module.model.model.num_parameters()
except AttributeError:
npars = pl_module.model.num_parameters()
n_trainable_pars = count_trainable_parameters(pl_module)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
self.train_time_epoch_list = []
self.train_tob_list = []
self.tokens = 0
self.train_time = 0.0
self.avg_steps_per_sec = 0.0
self.epochs = 0
try:
self.sync_dist = pl_module.sync_dist
except:
self.sync_dist = get_world_size() > 1
def process_stats(self, train_times, outputs, filter_p=0.8):
index_list = np.argsort(train_times) #sort based on train_times
best_n = int(len(outputs) * 0.8)
train_time = 0.0
unpadded_tokens = 0
for i in index_list[:best_n]:
train_time += train_times[i]
unpadded_tokens += outputs[i]
avg_steps_per_sec = train_time / best_n
return train_time, unpadded_tokens, best_n, avg_steps_per_sec
def on_train_epoch_end(self, trainer, pl_module, outputs):
try:
outputs = self.train_tob_list
train_time, unpadded_tokens, train_batches, avg_steps_per_sec = self.process_stats(self.train_time_epoch_list, outputs)
pl_module.log("train_throughput", unpadded_tokens/train_time, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=self.sync_dist)
all_reduce_tokens = all_reduce_item(unpadded_tokens, "sum")
all_reduce_time = all_reduce_item(train_time, "mean")
all_reduce_avg_steps_per_sec = all_reduce_item(avg_steps_per_sec, "mean")
#Accumulate
self.tokens = ((self.tokens * self.epochs) + all_reduce_tokens) / (self.epochs + 1)
self.train_time = ((self.train_time * self.epochs) + all_reduce_time) / (self.epochs + 1)
self.avg_steps_per_sec = ((self.avg_steps_per_sec * self.epochs) + all_reduce_avg_steps_per_sec) / (self.epochs + 1.0)
self.epochs +=1
#Reset
self.train_time_epoch_list = []
self.train_tob_list = []
except ZeroDivisionError:
print("Train time is reported as 0? It's possible training is already complete!")
pass
def on_train_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
if self.epochs < 1:
outputs = self.train_tob_list
train_time, unpadded_tokens, train_batches, avg_steps_per_sec = self.process_stats(self.train_time_epoch_list, outputs)
pl_module.log("train_throughput", unpadded_tokens/train_time, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=self.sync_dist)
all_reduce_tokens = all_reduce_item(unpadded_tokens, "sum")
all_reduce_time = all_reduce_item(train_time, "mean")
all_reduce_avg_steps_per_sec = all_reduce_item(avg_steps_per_sec, "mean")
#Accumulate
self.tokens = ((self.tokens * self.epochs) + all_reduce_tokens) / (self.epochs + 1)
self.train_time = ((self.train_time * self.epochs) + all_reduce_time) / (self.epochs + 1)
self.avg_steps_per_sec = ((self.avg_steps_per_sec * self.epochs) + all_reduce_avg_steps_per_sec) / (self.epochs + 1.0)
def get_checkpoint_callback(output_dir, metric, save_top_k=1):
"""Saves the best model by validation ROUGE2 score."""
monitor = f"val_{metric}"
if metric == "rouge2":
exp = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
exp = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "loss":
exp = "{loss:.4f}-{epoch}"
monitor = metric
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2, bleu and loss, got {metric}, You can make your own by adding to this function."
)
checkpoint_callback = ModelCheckpoint(
filename=os.path.join(output_dir, exp),
monitor=monitor,
mode="min" if "loss" in metric else "max",
save_top_k=save_top_k,
period=1, # maybe save a checkpoint every time val is run, not just end of epoch.
)
return checkpoint_callback
class CheckpointEveryNSteps(pl.Callback):
"""
Save a checkpoint every N steps, instead of Lightning's default that checkpoints
based on validation loss.
"""
def __init__(
self,
output_dir,
save_step_frequency,
prefix="",
use_modelcheckpoint_filename=False,
):
"""
Args:
save_step_frequency: how often to save in steps
prefix: add a prefix to the name, only used if
use_modelcheckpoint_filename=False
use_modelcheckpoint_filename: just use the ModelCheckpoint callback's
default filename, don't use ours.
"""
self.output_dir = output_dir
self.save_step_frequency = save_step_frequency
self.prefix = prefix
self.use_modelcheckpoint_filename = use_modelcheckpoint_filename
def on_batch_end(self, trainer: pl.Trainer, _):
""" Check if we should save a checkpoint after every train batch """
epoch = trainer.current_epoch
global_step = trainer.global_step
if global_step % self.save_step_frequency == 0:
if self.use_modelcheckpoint_filename:
filename = trainer.checkpoint_callback.filename
else:
filename = f"{self.prefix}_epoch{epoch}_step{global_step}.ckpt"
ckpt_path = os.path.join(self.output_dir, filename)
trainer.save_checkpoint(ckpt_path)
def on_train_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
epoch = trainer.current_epoch
global_step = trainer.global_step
if self.use_modelcheckpoint_filename:
filename = trainer.checkpoint_callback.filename
else:
filename = f"{self.prefix}_epoch{epoch}_step{global_step}.ckpt"
ckpt_path = os.path.join(self.output_dir, filename)
trainer.save_checkpoint(ckpt_path)
def get_early_stopping_callback(metric, patience):
return EarlyStopping(
monitor=metric, # does this need avg?
mode="min" if "loss" in metric else "max",
patience=patience,
verbose=True,
) |
PyTorch/Classification/GPUNet/triton/125ms-D/runner | runner | config_NVIDIA-DGX-A100-(1x-A100-80GB) | batching: dynamic
checkpoints:
- name: 1.25ms-D
url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d1_pyt_ckpt/versions/21.12.0_amp/zip
configurations:
- checkpoint: 1.25ms-D
parameters:
backend_accelerator: trt
checkpoint: 1.25ms-D
device_kind: gpu
export_format: onnx
export_precision: fp16
format: onnx
max_batch_size: 64
number_of_model_instances: 2
precision: fp16
tensorrt_capture_cuda_graph: 0
torch_jit: none
container_version: '21.12'
datasets:
- name: imagenet
datasets_dir: datasets
ensemble_model_name: null
framework: PyTorch
measurement_steps_offline: 8
measurement_steps_online: 32
model_name: GPUnet
performance_tool: model_analyzer
triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3
triton_custom_operations: null
triton_dockerfile: null
triton_load_model_method: explicit
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/deployment/export | export | ts-trace | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config:
type: ts-trace
|
PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler | lr_scheduler | fixed_schedule | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
# set defaults
args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0
self.lr = args.lr[0]
if args.warmup_updates > 0:
self.warmup_factor = 1. / args.warmup_updates
else:
self.warmup_factor = 1
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
def get_next_lr(self, epoch):
lrs = self.args.lr
if self.args.force_anneal is None or epoch < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal)
return next_lr
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates:
self.warmup_factor = num_updates / float(self.args.warmup_updates)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
|
PyTorch/Detection/SSD/examples | examples | SSD300_FP16_EVAL | # This script evaluates SSD300 model in FP16 using 32 batch size on 1 GPU
# Usage: ./SSD300_FP16_EVAL.sh <path to this repository> <path to dataset> <path to checkpoint> <additional flags>
python $1/main.py --backbone resnet50 --ebs 32 --data $2 --mode evaluation --checkpoint $3 ${@:4}
|
TensorFlow/Detection/SSD/models/research/slim/nets | nets | nets_factory | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
from nets import alexnet
from nets import cifarnet
from nets import i3d
from nets import inception
from nets import lenet
from nets import mobilenet_v1
from nets import overfeat
from nets import resnet_v1
from nets import resnet_v2
from nets import s3dg
from nets import vgg
from nets.mobilenet import mobilenet_v2
from nets.nasnet import nasnet
from nets.nasnet import pnasnet
slim = tf.contrib.slim
networks_map = {'alexnet_v2': alexnet.alexnet_v2,
'cifarnet': cifarnet.cifarnet,
'overfeat': overfeat.overfeat,
'vgg_a': vgg.vgg_a,
'vgg_16': vgg.vgg_16,
'vgg_19': vgg.vgg_19,
'inception_v1': inception.inception_v1,
'inception_v2': inception.inception_v2,
'inception_v3': inception.inception_v3,
'inception_v4': inception.inception_v4,
'inception_resnet_v2': inception.inception_resnet_v2,
'i3d': i3d.i3d,
's3dg': s3dg.s3dg,
'lenet': lenet.lenet,
'resnet_v1_50': resnet_v1.resnet_v1_50,
'resnet_v1_101': resnet_v1.resnet_v1_101,
'resnet_v1_152': resnet_v1.resnet_v1_152,
'resnet_v1_200': resnet_v1.resnet_v1_200,
'resnet_v2_50': resnet_v2.resnet_v2_50,
'resnet_v2_101': resnet_v2.resnet_v2_101,
'resnet_v2_152': resnet_v2.resnet_v2_152,
'resnet_v2_200': resnet_v2.resnet_v2_200,
'mobilenet_v1': mobilenet_v1.mobilenet_v1,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025,
'mobilenet_v2': mobilenet_v2.mobilenet,
'mobilenet_v2_140': mobilenet_v2.mobilenet_v2_140,
'mobilenet_v2_035': mobilenet_v2.mobilenet_v2_035,
'nasnet_cifar': nasnet.build_nasnet_cifar,
'nasnet_mobile': nasnet.build_nasnet_mobile,
'nasnet_large': nasnet.build_nasnet_large,
'pnasnet_large': pnasnet.build_pnasnet_large,
'pnasnet_mobile': pnasnet.build_pnasnet_mobile,
}
arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope,
'cifarnet': cifarnet.cifarnet_arg_scope,
'overfeat': overfeat.overfeat_arg_scope,
'vgg_a': vgg.vgg_arg_scope,
'vgg_16': vgg.vgg_arg_scope,
'vgg_19': vgg.vgg_arg_scope,
'inception_v1': inception.inception_v3_arg_scope,
'inception_v2': inception.inception_v3_arg_scope,
'inception_v3': inception.inception_v3_arg_scope,
'inception_v4': inception.inception_v4_arg_scope,
'inception_resnet_v2':
inception.inception_resnet_v2_arg_scope,
'i3d': i3d.i3d_arg_scope,
's3dg': s3dg.s3dg_arg_scope,
'lenet': lenet.lenet_arg_scope,
'resnet_v1_50': resnet_v1.resnet_arg_scope,
'resnet_v1_101': resnet_v1.resnet_arg_scope,
'resnet_v1_152': resnet_v1.resnet_arg_scope,
'resnet_v1_200': resnet_v1.resnet_arg_scope,
'resnet_v2_50': resnet_v2.resnet_arg_scope,
'resnet_v2_101': resnet_v2.resnet_arg_scope,
'resnet_v2_152': resnet_v2.resnet_arg_scope,
'resnet_v2_200': resnet_v2.resnet_arg_scope,
'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope,
'mobilenet_v2': mobilenet_v2.training_scope,
'mobilenet_v2_035': mobilenet_v2.training_scope,
'mobilenet_v2_140': mobilenet_v2.training_scope,
'nasnet_cifar': nasnet.nasnet_cifar_arg_scope,
'nasnet_mobile': nasnet.nasnet_mobile_arg_scope,
'nasnet_large': nasnet.nasnet_large_arg_scope,
'pnasnet_large': pnasnet.pnasnet_large_arg_scope,
'pnasnet_mobile': pnasnet.pnasnet_mobile_arg_scope,
}
def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False):
"""Returns a network_fn such as `logits, end_points = network_fn(images)`.
Args:
name: The name of the network.
num_classes: The number of classes to use for classification. If 0 or None,
the logits layer is omitted and its input features are returned instead.
weight_decay: The l2 coefficient for the model weights.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
network_fn: A function that applies the model to a batch of images. It has
the following signature:
net, end_points = network_fn(images)
The `images` input is a tensor of shape [batch_size, height, width, 3]
with height = width = network_fn.default_image_size. (The permissibility
and treatment of other sizes depends on the network_fn.)
The returned `end_points` are a dictionary of intermediate activations.
The returned `net` is the topmost layer, depending on `num_classes`:
If `num_classes` was a non-zero integer, `net` is a logits tensor
of shape [batch_size, num_classes].
If `num_classes` was 0 or `None`, `net` is a tensor with the input
to the logits layer of shape [batch_size, 1, 1, num_features] or
[batch_size, num_features]. Dropout has not been applied to this
(even if the network's original classification does); it remains for
the caller to do this or not.
Raises:
ValueError: If network `name` is not recognized.
"""
if name not in networks_map:
raise ValueError('Name of network unknown %s' % name)
func = networks_map[name]
@functools.wraps(func)
def network_fn(images, **kwargs):
arg_scope = arg_scopes_map[name](weight_decay=weight_decay)
with slim.arg_scope(arg_scope):
return func(images, num_classes=num_classes, is_training=is_training,
**kwargs)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return network_fn
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner/maintainer/docker | docker | container | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
|
PyTorch/Classification/ConvNets/se-resnext101-32x4d | se-resnext101-32x4d | README | # SE-ResNeXt101-32x4d For PyTorch
This repository provides a script and recipe to train the SE-ResNeXt101-32x4d model to
achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
* [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Optimizer](#optimizer)
* [Data augmentation](#data-augmentation)
* [DALI](#dali)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Command-line options](#command-line-options)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Example plots](#example-plots)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)](#training-performance-nvidia-dgx-1-16gb-8x-v100-16gb)
* [Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)](#training-performance-nvidia-dgx-1-32gb-8x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX-1 16GB (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The SE-ResNeXt101-32x4d is a [ResNeXt101-32x4d](https://arxiv.org/pdf/1611.05431.pdf)
model with added Squeeze-and-Excitation module introduced
in [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf) paper.
Squeeze and Excitation module architecture for ResNet-type models:
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
We use [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) when training using Mixed Precision.
### Model architecture
![SEArch](./img/SEArch.png)
_Image source: [Squeeze-and-Excitation Networks](https://arxiv.org/pdf/1709.01507.pdf)_
Image shows the architecture of SE block and where is it placed in ResNet bottleneck block.
### Default configuration
The following sections highlight the default configurations for the SE-ResNeXt101-32x4d model.
#### Optimizer
This model uses SGD with momentum optimizer with the following hyperparameters:
* Momentum (0.875)
* Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly
scale the learning rate.
* Learning rate schedule - we use cosine LR schedule
* For bigger batch sizes (512 and up) we use linear warmup of the learning rate
during the first couple of epochs
according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677).
Warmup length depends on the total training length.
* Weight decay (WD)= 6.103515625e-05 (1/16384).
* We do not apply WD on Batch Norm trainable parameters (gamma/bias)
* Label smoothing = 0.1
* We train for:
* 90 Epochs -> 90 epochs is a standard for ImageNet networks
* 250 Epochs -> best possible accuracy.
* For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf).
#### Data augmentation
This model uses the following data augmentation:
* For training:
* Normalization
* Random resized crop to 224x224
* Scale from 8% to 100%
* Aspect ratio from 3/4 to 4/3
* Random horizontal flip
* For inference:
* Normalization
* Scale to 256x256
* Center crop to 224x224
### Feature support matrix
The following features are supported by this model:
| Feature | SE-ResNeXt101-32x4d
|-----------------------|--------------------------
|[DALI](https://docs.nvidia.com/deeplearning/sdk/dali-release-notes/index.html) | Yes
|[APEX AMP](https://nvidia.github.io/apex/amp.html) | Yes |
#### Features
- NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader
with the DALI library. For more information about DALI, refer to the [DALI product documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html).
- [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as [Automatic Mixed Precision (AMP)](https://nvidia.github.io/apex/amp.html), which require minimal network code changes to leverage Tensor Cores performance. Refer to the [Enabling mixed precision](#enabling-mixed-precision) section for more details.
### DALI
We use [NVIDIA DALI](https://github.com/NVIDIA/DALI),
which speeds up data loading when CPU becomes a bottleneck.
DALI can use CPU or GPU, and outperforms the PyTorch native dataloader.
Run training with `--data-backends dali-gpu` or `--data-backends dali-cpu` to enable DALI.
For DGXA100 and DGX1 we recommend `--data-backends dali-cpu`.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), a library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval,
while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients.
In PyTorch, loss scaling can be easily applied by using scale_loss() method provided by AMP. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed.
For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage tensor cores performance.
To enable mixed precision, you can:
- Import AMP from APEX:
```python
from apex import amp
```
- Wrap model and optimizer in amp.initialize:
```python
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale="dynamic")
```
- Scale loss before backpropagation:
```python
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the SE-ResNeXt101-32x4d model.
### Requirements
This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch 21.03-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer
* Supported GPUs:
* [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
* [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
* [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
### 1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/Classification/
```
### 2. Download and preprocess the dataset.
The SE-ResNeXt101-32x4d script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge.
PyTorch can work directly on JPEGs, therefore, preprocessing/augmentation is not needed.
To train your model using mixed or TF32 precision with Tensor Cores or using FP32,
perform the following steps using the default parameters of the se-resnext101-32x4d model on the ImageNet dataset.
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. [Download the images](http://image-net.org/download-images).
2. Extract the training data:
```bash
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done
cd ..
```
3. Extract the validation data and move the images to subfolders:
```bash
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash
```
The directory in which the `train/` and `val/` directories are placed, is referred to as `<path to imagenet>` in this document.
### 3. Build the SE-ResNeXt101-32x4d PyTorch NGC container.
```
docker build . -t nvidia_se-resnext101-32x4d
```
### 4. Start an interactive session in the NGC container to run training/inference.
```
nvidia-docker run --rm -it -v <path to imagenet>:/imagenet --ipc=host nvidia_se-resnext101-32x4d
```
### 5. Start training
To run training for a standard configuration (DGXA100/DGX1V, AMP/TF32/FP32, 90/250 Epochs),
run one of the scripts in the `./se-resnext101-32x4d/training` directory
called `./se-resnext101-32x4d/training/{AMP, TF32, FP32}/{ DGXA100, DGX1V }_se-resnext101-32x4d_{AMP, TF32, FP32}_{ 90, 250 }E.sh`.
Ensure ImageNet is mounted in the `/imagenet` directory.
Example:
`bash ./se-resnext101-32x4d/training/AMP/DGX1_se-resnext101-32x4d_AMP_250E.sh <path were to store checkpoints and logs>`
### 6. Start inference
You can download pretrained weights from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/seresnext101_32x4d_pyt_amp/versions/20.06.0/zip -O seresnext101_32x4d_pyt_amp_20.06.0.zip
unzip seresnext101_32x4d_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch se-resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file nvidia_se-resnext101-32x4d_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch se-resnext101-32x4d --pretrained-from-file nvidia_se-resnext101-32x4d_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
To run a non standard configuration use:
* For 1 GPU
* FP32
`python ./main.py --arch se-resnext101-32x4d -c fanin --label-smoothing 0.1 <path to imagenet>`
`python ./main.py --arch se-resnext101-32x4d -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
* For multiple GPUs
* FP32
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch se-resnext101-32x4d -c fanin --label-smoothing 0.1 <path to imagenet>`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch se-resnext101-32x4d -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
Use `python ./main.py -h` to obtain the list of available options in the `main.py` script.
### Command-line options:
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python main.py -h`
```
usage: main.py [-h] [--data-backend BACKEND] [--arch ARCH]
[--model-config CONF] [-j N] [--epochs N]
[--run-epochs N] [-b N] [--optimizer-batch-size N] [--lr LR]
[--lr-schedule SCHEDULE] [--warmup E] [--label-smoothing S]
[--mixup ALPHA] [--momentum M] [--weight-decay W]
[--bn-weight-decay] [--nesterov] [--print-freq N]
[--resume PATH] [--pretrained-from-file PATH]
[--static-loss-scale STATIC_LOSS_SCALE] [--dynamic-loss-scale]
[--prof N] [--amp] [--seed SEED] [--gather-checkpoints]
[--raport-file RAPORT_FILE] [--evaluate] [--training-only]
[--no-checkpoints] [--checkpoint-filename CHECKPOINT_FILENAME]
[--workspace DIR] [--memory-format {nchw,nhwc}]
DIR
PyTorch ImageNet Training
positional arguments:
DIR path to dataset
optional arguments:
-h, --help show this help message and exit
--data-backend BACKEND
data backend: pytorch | synthetic | dali-gpu | dali-cpu
(default: dali-cpu)
--arch ARCH, -a ARCH model architecture: resnet18 | resnet34 | resnet50 |
resnet101 | resnet152 | resnext50-32x4d |
resnext101-32x4d | resnext101-32x8d |
resnext101-32x8d-basic | se-resnext101-32x4d (default:
resnet50)
--model-config CONF, -c CONF
model configs: classic | fanin | grp-fanin | grp-
fanout(default: classic)
-j N, --workers N number of data loading workers (default: 5)
--epochs N number of total epochs to run
--run-epochs N run only N epochs, used for checkpointing runs
-b N, --batch-size N mini-batch size (default: 256) per gpu
--optimizer-batch-size N
size of a total batch size, for simulating bigger
batches using gradient accumulation
--lr LR, --learning-rate LR
initial learning rate
--lr-schedule SCHEDULE
Type of LR schedule: step, linear, cosine
--warmup E number of warmup epochs
--label-smoothing S label smoothing
--mixup ALPHA mixup alpha
--momentum M momentum
--weight-decay W, --wd W
weight decay (default: 1e-4)
--bn-weight-decay use weight_decay on batch normalization learnable
parameters, (default: false)
--nesterov use nesterov momentum, (default: false)
--print-freq N, -p N print frequency (default: 10)
--resume PATH path to latest checkpoint (default: none)
--pretrained-from-file PATH
load weights from here
--static-loss-scale STATIC_LOSS_SCALE
Static loss scale, positive power of 2 values can
improve amp convergence.
--dynamic-loss-scale Use dynamic loss scaling. If supplied, this argument
supersedes --static-loss-scale.
--prof N Run only N iterations
--amp Run model AMP (automatic mixed precision) mode.
--seed SEED random seed used for numpy and pytorch
--gather-checkpoints Gather checkpoints throughout the training, without
this flag only best and last checkpoints will be
stored
--raport-file RAPORT_FILE
file in which to store JSON experiment raport
--evaluate evaluate checkpoint/model
--training-only do not evaluate
--no-checkpoints do not store any checkpoints, useful for benchmarking
--checkpoint-filename CHECKPOINT_FILENAME
--workspace DIR path to directory where checkpoints will be stored
--memory-format {nchw,nhwc}
memory layout, nchw or nhwc
```
### Dataset guidelines
To use your own dataset, divide it in directories as in the following scheme:
- Training images - `train/<class id>/<image>`
- Validation images - `val/<class id>/<image>`
If your dataset's has number of classes different than 1000, you need to pass `--num_classes N` flag to the training script.
### Training process
All the results of the training will be stored in the directory specified with `--workspace` argument.
Script will store:
- most recent checkpoint - `checkpoint.pth.tar` (unless `--no-checkpoints` flag is used).
- checkpoint with best validation accuracy - `model_best.pth.tar` (unless `--no-checkpoints` flag is used).
- JSON log - in the file specified with `--raport-file` flag.
Metrics gathered through training:
- `train.loss` - training loss
- `train.total_ips` - training speed measured in images/second
- `train.compute_ips` - training speed measured in images/second, not counting data loading
- `train.data_time` - time spent on waiting on data
- `train.compute_time` - time spent in forward/backward pass
To restart training from checkpoint use `--resume` option.
To start training from pretrained weights (e.g. downloaded from NGC) use `--pretrained-from-file` option.
The difference between those two is that the pretrained weights contain only model weights,
and checkpoints, apart from model weights, contain optimizer state, LR scheduler state.
Checkpoints are suitable for dividing the training into parts, for example in order
to divide the training job into shorter stages, or restart training after infrastructure fail.
Pretrained weights can be used as a base for finetuning the model to a different dataset,
or as a backbone to detection models.
### Inference process
Validation is done every epoch, and can be also run separately on a checkpointed model.
`python ./main.py --arch se-resnext101-32x4d --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>`
Metrics gathered through training:
- `val.loss` - validation loss
- `val.top1` - validation top1 accuracy
- `val.top5` - validation top5 accuracy
- `val.total_ips` - inference speed measured in images/second
- `val.compute_ips` - inference speed measured in images/second, not counting data loading
- `val.data_time` - time spent on waiting on data
- `val.compute_time` - time spent on inference
To run inference on JPEG image, you have to first extract the model weights from checkpoint:
`python checkpoint2model.py --checkpoint-path <path to checkpoint> --weight-path <path where weights will be stored>`
Then run classification script:
`python classify.py --arch se-resnext101-32x4d --pretrained-from-file <path to weights from previous step> --precision AMP|FP32 --image <path to JPEG image>`
You can also run ImageNet validation on pretrained weights:
`python ./main.py --arch se-resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file <path to pretrained weights> -b <batch size> <path to imagenet>`
#### NGC Pretrained weights:
Pretrained weights can be downloaded from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/seresnext101_32x4d_pyt_amp/versions/20.06.0/zip -O seresnext101_32x4d_pyt_amp_20.06.0.zip
unzip seresnext101_32x4d_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch se-resnext101-32x4d --evaluate --epochs 1 --pretrained-from-file nvidia_se-resnext101-32x4d_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch se-resnext101-32x4d --pretrained-from-file nvidia_se-resnext101-32x4d_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training, run:
* For 1 GPU
* FP32 (V100 GPUs only)
`python ./launch.py --model se-resnext101-32x4d --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model se-resnext101-32x4d --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model se-resnext101-32x4d --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* For multiple GPUs
* FP32 (V100 GPUs only)
`python ./launch.py --model se-resnext101-32x4d --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
#### Inference performance benchmark
To benchmark inference, run:
* FP32 (V100 GPUs only)
`python ./launch.py --model se-resnext101-32x4d --precision FP32 --mode benchmark_inference --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model se-resnext101-32x4d --precision TF32 --mode benchmark_inference --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model se-resnext101-32x4d --precision AMP --mode benchmark_inference --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
### Results
#### Training accuracy results
Our results were obtained by running the applicable training script the pytorch-20.12 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
| **Epochs** | **Mixed Precision Top1** | **TF32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 80.03 +/- 0.11 | 79.92 +/- 0.07 |
| 250 | 80.9 +/- 0.08 | 80.98 +/- 0.07 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
| **Epochs** | **Mixed Precision Top1** | **FP32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 80.04 +/- 0.07 | 79.93 +/- 0.10 |
| 250 | 80.92 +/- 0.09 | 80.97 +/- 0.09 |
##### Example plots
The following images show a 250 epochs configuration on a DGX-1V.
![ValidationLoss](./img/loss_plot.png)
![ValidationTop1](./img/top1_plot.png)
![ValidationTop5](./img/top5_plot.png)
#### Training performance results
Our results were obtained by running the applicable training script the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
| **GPUs** | **Throughput - TF32** | **Throughput - mixed precision** | **Throughput speedup (TF32 to mixed precision)** | **TF32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **TF32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 395 img/s | 855 img/s | 2.16 x | 1.0 x | 1.0 x | ~40 hours | ~86 hours |
| 8 | 2991 img/s | 5779 img/s | 1.93 x | 7.56 x | 6.75 x | ~6 hours | ~12 hours |
##### Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 132 img/s | 443 img/s | 3.34 x | 1.0 x | 1.0 x | ~76 hours | ~254 hours |
| 8 | 1004 img/s | 2971 img/s | 2.95 x | 7.57 x | 6.7 x | ~12 hours | ~34 hours |
##### Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:---------------------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 130 img/s | 427 img/s | 3.26 x | 1.0 x | 1.0 x | ~79 hours | ~257 hours |
| 8 | 992 img/s | 2925 img/s | 2.94 x | 7.58 x | 6.84 x | ~12 hours | ~34 hours |
#### Inference performance results
Our results were obtained by running the applicable training script the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 40 img/s | 24.92 ms | 26.78 ms | 31.12 ms |
| 2 | 80 img/s | 24.89 ms | 27.63 ms | 30.81 ms |
| 4 | 127 img/s | 31.58 ms | 35.92 ms | 39.64 ms |
| 8 | 250 img/s | 32.29 ms | 34.5 ms | 38.14 ms |
| 16 | 363 img/s | 44.5 ms | 44.16 ms | 44.37 ms |
| 32 | 423 img/s | 76.86 ms | 75.89 ms | 76.17 ms |
| 64 | 472 img/s | 138.36 ms | 135.85 ms | 136.52 ms |
| 128 | 501 img/s | 262.64 ms | 255.48 ms | 256.02 ms |
| 256 | 508 img/s | 519.84 ms | 500.71 ms | 501.5 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 29 img/s | 33.83 ms | 39.1 ms | 41.57 ms |
| 2 | 58 img/s | 34.35 ms | 36.92 ms | 41.66 ms |
| 4 | 117 img/s | 34.33 ms | 38.67 ms | 41.05 ms |
| 8 | 232 img/s | 34.66 ms | 39.51 ms | 42.16 ms |
| 16 | 459 img/s | 35.23 ms | 36.77 ms | 38.11 ms |
| 32 | 871 img/s | 37.62 ms | 39.36 ms | 41.26 ms |
| 64 | 1416 img/s | 46.95 ms | 45.26 ms | 47.48 ms |
| 128 | 1533 img/s | 87.49 ms | 83.54 ms | 83.75 ms |
| 256 | 1576 img/s | 170.79 ms | 161.97 ms | 162.93 ms |
##### Inference performance: NVIDIA T4
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 40 img/s | 25.12 ms | 28.83 ms | 31.59 ms |
| 2 | 75 img/s | 26.82 ms | 30.54 ms | 33.13 ms |
| 4 | 136 img/s | 29.79 ms | 33.33 ms | 37.65 ms |
| 8 | 155 img/s | 51.74 ms | 52.57 ms | 53.12 ms |
| 16 | 164 img/s | 97.99 ms | 98.76 ms | 99.21 ms |
| 32 | 173 img/s | 186.31 ms | 186.43 ms | 187.4 ms |
| 64 | 171 img/s | 378.1 ms | 377.19 ms | 378.82 ms |
| 128 | 165 img/s | 785.83 ms | 778.23 ms | 782.64 ms |
| 256 | 158 img/s | 1641.96 ms | 1601.74 ms | 1614.52 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 31 img/s | 32.51 ms | 37.26 ms | 39.53 ms |
| 2 | 61 img/s | 32.76 ms | 37.61 ms | 39.62 ms |
| 4 | 123 img/s | 32.98 ms | 38.97 ms | 42.66 ms |
| 8 | 262 img/s | 31.01 ms | 36.3 ms | 39.11 ms |
| 16 | 482 img/s | 33.76 ms | 34.54 ms | 38.5 ms |
| 32 | 512 img/s | 63.68 ms | 63.29 ms | 63.73 ms |
| 64 | 527 img/s | 123.57 ms | 122.69 ms | 123.56 ms |
| 128 | 525 img/s | 248.97 ms | 245.39 ms | 246.66 ms |
| 256 | 527 img/s | 496.23 ms | 485.68 ms | 488.3 ms |
## Release notes
### Changelog
1. October 2019
* Initial release
2. July 2020
* Added A100 scripts
* Updated README
3. February 2021
* Moved from APEX AMP to Native AMP
### Known issues
There are no known issues with this model.
|
PyTorch/Classification/ConvNets/triton/deployment_toolkit | deployment_toolkit | dump | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict, Iterable
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]):
assert all(
[current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)]
), "target_shape should have equal or greater all dimensions comparing to data.shape"
padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0)
(0, target_size - current_size)
for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:])
]
return np.pad(data, padding, "constant", constant_values=np.nan)
class NpzWriter:
"""
Dumps dicts of numpy arrays into npz files
It can/shall be used as context manager:
```
with OutputWriter('mydir') as writer:
writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))},
labels={'classes': np.zeros(8)},
inputs={'input': np.zeros((8, 240, 240, 3)})
```
## Variable size data
Only dynamic of last axis is handled. Data is padded with np.nan value.
Also each generated file may have different size of dynamic axis.
"""
def __init__(self, output_dir, compress=False):
self._output_dir = Path(output_dir)
self._items_cache: Dict[str, Dict[str, np.ndarray]] = {}
self._items_counters: Dict[str, int] = {}
self._flush_threshold_b = FLUSH_THRESHOLD_B
self._compress = compress
@property
def cache_size(self):
return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()}
def _append_to_cache(self, prefix, data):
if data is None:
return
if not isinstance(data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
cached_data = self._items_cache.get(prefix, {})
for name, value in data.items():
assert isinstance(
value, (list, np.ndarray)
), f"Values shall be lists or np.ndarrays; current type {type(value)}"
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value.dtype.kind in ["S", "U"] or not np.any(
np.isnan(value)
), f"Values with np.nan is not supported; {name}={value}"
cached_value = cached_data.get(name, None)
if cached_value is not None:
target_shape = np.max([cached_value.shape, value.shape], axis=0)
cached_value = pad_except_batch_axis(cached_value, target_shape)
value = pad_except_batch_axis(value, target_shape)
value = np.concatenate((cached_value, value))
cached_data[name] = value
self._items_cache[prefix] = cached_data
def write(self, **kwargs):
"""
Writes named list of dictionaries of np.ndarrays.
Finally keyword names will be later prefixes of npz files where those dictionaries will be stored.
ex. writer.write(inputs={'input': np.zeros((2, 10))},
outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))},
labels={'classes': np.zeros((2,))})
Args:
**kwargs: named list of dictionaries of np.ndarrays to store
"""
for prefix, data in kwargs.items():
self._append_to_cache(prefix, data)
biggest_item_size = max(self.cache_size.values())
if biggest_item_size > self._flush_threshold_b:
self.flush()
def flush(self):
for prefix, data in self._items_cache.items():
self._dump(prefix, data)
self._items_cache = {}
def _dump(self, prefix, data):
idx = self._items_counters.setdefault(prefix, 0)
filename = f"{prefix}-{idx:012d}.npz"
output_path = self._output_dir / filename
if self._compress:
np.savez_compressed(output_path, **data)
else:
np.savez(output_path, **data)
nitems = len(list(data.values())[0])
msg_for_labels = (
"If these are correct shapes - consider moving loading of them into metrics.py."
if prefix == "labels"
else ""
)
shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()}
assert all(len(v) == nitems for v in data.values()), (
f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}'
f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}'
)
self._items_counters[prefix] += nitems
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/model_analyzer | model_analyzer | exceptions | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
. | DeepLearningExamples | README | # NVIDIA Deep Learning Examples for Tensor Cores
## Introduction
This repository provides State-of-the-Art Deep Learning examples that are easy to train and deploy, achieving the best reproducible accuracy and performance with NVIDIA CUDA-X software stack running on NVIDIA Volta, Turing and Ampere GPUs.
## NVIDIA GPU Cloud (NGC) Container Registry
These examples, along with our NVIDIA deep learning software stack, are provided in a monthly updated Docker container on the NGC container registry (https://ngc.nvidia.com). These containers include:
- The latest NVIDIA examples from this repository
- The latest NVIDIA contributions shared upstream to the respective framework
- The latest NVIDIA Deep Learning software libraries, such as cuDNN, NCCL, cuBLAS, etc. which have all been through a rigorous monthly quality assurance process to ensure that they provide the best possible performance
- [Monthly release notes](https://docs.nvidia.com/deeplearning/dgx/index.html#nvidia-optimized-frameworks-release-notes) for each of the NVIDIA optimized containers
## Computer Vision
| Models | Framework | AMP | Multi-GPU | Multi-Node | TensorRT | ONNX | Triton | DLC | NB |
|----------------------------------------------------------------------------------------------------------------------------------------|--------------|----------------|-----------|------------|----------|------|------------------------------------------------------------------------------------------------------------------------------|------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| [EfficientNet-B0](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [EfficientNet-B4](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [EfficientNet-WideSE-B0](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [EfficientNet-WideSE-B4](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/efficientnet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [EfficientNet v1-B0](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Classification/ConvNets/efficientnet_v1) | TensorFlow2 | Yes | Yes | Yes | [Example](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/efficientnet) | - | Supported | Yes | - |
| [EfficientNet v1-B4](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Classification/ConvNets/efficientnet_v1) | TensorFlow2 | Yes | Yes | Yes | [Example](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/efficientnet) | - | Supported | Yes | - |
| [EfficientNet v2-S](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Classification/ConvNets/efficientnet_v2) | TensorFlow2 | Yes | Yes | Yes | [Example](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/efficientnet) | - | Supported | Yes | - |
| [GPUNet](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/GPUNet) | PyTorch | Yes | Yes | - | Example | Yes | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/GPUNet/triton/) | Yes | - |
| [Mask R-CNN](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/MaskRCNN) | PyTorch | Yes | Yes | - | [Example](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/detectron2) | - | Supported | - | [Yes](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Segmentation/MaskRCNN/pytorch/notebooks/pytorch_MaskRCNN_pyt_train_and_inference.ipynb) |
| [Mask R-CNN](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Segmentation/MaskRCNN) | TensorFlow2 | Yes | Yes | - | [Example](https://github.com/NVIDIA/TensorRT/tree/main/samples/python/detectron2) | - | Supported | Yes | - |
| [nnUNet](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/nnUNet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/MxNet/Classification/RN50v1.5) | MXNet | Yes | Yes | - | Supported | - | Supported | - | - |
| [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PaddlePaddle/Classification/RN50v1.5) | PaddlePaddle | Yes | Yes | - | Example | - | Supported | - | - |
| [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5) | PyTorch | Yes | Yes | - | Example | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/triton/resnet50) | Yes | - |
| [ResNet-50](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Classification/ConvNets/resnet50v1.5) | TensorFlow | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [ResNeXt-101](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnext101-32x4d) | PyTorch | Yes | Yes | - | Example | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/triton/resnext101-32x4d) | Yes | - |
| [ResNeXt-101](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Classification/ConvNets/resnext101-32x4d) | TensorFlow | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [SE-ResNeXt-101](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/se-resnext101-32x4d) | PyTorch | Yes | Yes | - | Example | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/triton/se-resnext101-32x4d) | Yes | - |
| [SE-ResNeXt-101](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Classification/ConvNets/se-resnext101-32x4d) | TensorFlow | Yes | Yes | - | Supported | - | Supported | Yes | - |
| [SSD](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Detection/SSD) | PyTorch | Yes | Yes | - | Supported | - | Supported | - | [Yes](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Detection/SSD/examples/inference.ipynb) |
| [SSD](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Detection/SSD) | TensorFlow | Yes | Yes | - | Supported | - | Supported | Yes | [Yes](https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow/Detection/SSD/models/research/object_detection/object_detection_tutorial.ipynb) |
| [U-Net Med](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Segmentation/UNet_Medical) | TensorFlow2 | Yes | Yes | - | Example | - | Supported | Yes | - |
## Natural Language Processing
| Models | Framework | AMP | Multi-GPU | Multi-Node | TensorRT | ONNX | Triton | DLC | NB |
|------------------------------------------------------------------------------------------------------------------------|-------------|------|-----------|------------|----------|------|-----------------------------------------------------------------------------------------------------------|------|---------------------------------------------------------------------------------------------------------------------------------------------|
| [BERT](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/BERT) | PyTorch | Yes | Yes | Yes | [Example](https://github.com/NVIDIA/TensorRT/tree/main/demo/BERT) | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/BERT/triton) | Yes | - |
| [GNMT](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Translation/GNMT) | PyTorch | Yes | Yes | - | Supported | - | Supported | - | - |
| [ELECTRA](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/LanguageModeling/ELECTRA) | TensorFlow2 | Yes | Yes | Yes | Supported | - | Supported | Yes | - |
| [BERT](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/LanguageModeling/BERT) | TensorFlow | Yes | Yes | Yes | Example | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/LanguageModeling/BERT/triton) | Yes | [Yes](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/LanguageModeling/BERT/notebooks) |
| [BERT](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/LanguageModeling/BERT) | TensorFlow2 | Yes | Yes | Yes | Supported | - | Supported | Yes | - |
| [GNMT](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Translation/GNMT) | TensorFlow | Yes | Yes | - | Supported | - | Supported | - | - |
| [Faster Transformer](https://github.com/NVIDIA/DeepLearningExamples/tree/master/FasterTransformer) | Tensorflow | - | - | - | Example | - | Supported | - | - |
## Recommender Systems
| Models | Framework | AMP | Multi-GPU | Multi-Node | ONNX | Triton | DLC | NB |
|----------------------------------------------------------------------------------------------------------------|-------------|-------|-----------|--------------|--------|------------------------------------------------------------------------------------------------------|------|--------------------------------------------------------------------------------------------------------|
| [DLRM](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Recommendation/DLRM) | PyTorch | Yes | Yes | - | Yes | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Recommendation/DLRM/triton) | Yes | [Yes](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Recommendation/DLRM/notebooks) |
| [DLRM](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Recommendation/DLRM) | TensorFlow2 | Yes | Yes | Yes | - | Supported | Yes | - |
| [NCF](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Recommendation/NCF) | PyTorch | Yes | Yes | - | - | Supported | - | - |
| [Wide&Deep](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Recommendation/WideAndDeep) | TensorFlow | Yes | Yes | - | - | Supported | Yes | - |
| [Wide&Deep](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Recommendation/WideAndDeep) | TensorFlow2 | Yes | Yes | - | - | Supported | Yes | - |
| [NCF](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Recommendation/NCF) | TensorFlow | Yes | Yes | - | - | Supported | Yes | - |
| [VAE-CF](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Recommendation/VAE-CF) | TensorFlow | Yes | Yes | - | - | Supported | - | - |
| [SIM](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow2/Recommendation/SIM) | TensorFlow2 | Yes | Yes | - | - | Supported | Yes | - |
## Speech to Text
| Models | Framework | AMP | Multi-GPU | Multi-Node | TensorRT | ONNX | Triton | DLC | NB |
|--------------------------------------------------------------------------------------------------------------|-------------|------|------------|--------------|----------|--------|----------------------------------------------------------------------------------------------------------|-------|--------------------------------------------------------------------------------------------------------------|
| [Jasper](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/Jasper) | PyTorch | Yes | Yes | - | Example | Yes | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/Jasper/trtis) | Yes | [Yes](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/Jasper/notebooks) |
| [QuartzNet](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechRecognition/QuartzNet) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
## Text to Speech
| Models | Framework | AMP | Multi-GPU | Multi-Node | TensorRT | ONNX | Triton | DLC | NB |
|-------------------------------------------------------------------------------------------------------------------------|-------------|------|------------|-------------|----------|--------|---------------------------------------------------------------------------------------------------------------|-------|-----|
| [FastPitch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch) | PyTorch | Yes | Yes | - | Example | - | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/FastPitch/triton) | Yes | Yes |
| [FastSpeech](https://github.com/NVIDIA/DeepLearningExamples/tree/master/CUDA-Optimized/FastSpeech) | PyTorch | Yes | Yes | - | Example | - | Supported | - | - |
| [Tacotron 2 and WaveGlow](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2) | PyTorch | Yes | Yes | - | Example | Yes | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp) | Yes | - |
| [HiFi-GAN](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/HiFiGAN) | PyTorch | Yes | Yes | - | Supported | - | Supported | Yes | - |
## Graph Neural Networks
| Models | Framework | AMP | Multi-GPU | Multi-Node | ONNX | Triton | DLC | NB |
|-------------------------------------------------------------------------------------------------------------------------|------------|------|------------|--------------|--------|----------|------|------|
| [SE(3)-Transformer](https://github.com/NVIDIA/DeepLearningExamples/tree/master/DGLPyTorch/DrugDiscovery/SE3Transformer) | PyTorch | Yes | Yes | - | - | Supported | - | - |
| [MoFlow](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/DrugDiscovery/MoFlow) | PyTorch | Yes | Yes | - | - | Supported | - | - |
## Time-Series Forecasting
| Models | Framework | AMP | Multi-GPU | Multi-Node | TensorRT | ONNX | Triton | DLC | NB |
|-------------------------------------------------------------------------------------------------------------------|------------|------|-------------|--------------|----------|--------|--------------------------------------------------------------------------------------------------|-------|-----|
| [Temporal Fusion Transformer](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Forecasting/TFT) | PyTorch | Yes | Yes | - | Example | Yes | [Example](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Forecasting/TFT/triton) | Yes | - |
## NVIDIA support
In each of the network READMEs, we indicate the level of support that will be provided. The range is from ongoing updates and improvements to a point-in-time release for thought leadership.
## Glossary
**Multinode Training**
Supported on a pyxis/enroot Slurm cluster.
**Deep Learning Compiler (DLC)**
TensorFlow XLA and PyTorch JIT and/or TorchScript
**Accelerated Linear Algebra (XLA)**
XLA is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. The results are improvements in speed and memory usage.
**PyTorch JIT and/or TorchScript**
TorchScript is a way to create serializable and optimizable models from PyTorch code. TorchScript, an intermediate representation of a PyTorch model (subclass of nn.Module) that can then be run in a high-performance environment such as C++.
**Automatic Mixed Precision (AMP)**
Automatic Mixed Precision (AMP) enables mixed precision training on Volta, Turing, and NVIDIA Ampere GPU architectures automatically.
**TensorFloat-32 (TF32)**
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
**Jupyter Notebooks (NB)**
The Jupyter Notebook is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text.
## Feedback / Contributions
We're posting these examples on GitHub to better support the community, facilitate feedback, as well as collect and implement contributions using GitHub Issues and pull requests. We welcome all contributions!
## Known issues
In each of the network READMEs, we indicate any known issues and encourage the community to provide feedback.
|
PyTorch/SpeechRecognition/wav2vec2/common | common | optimizers | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from common.fairseq.optim.adam import FairseqAdam
from common.fairseq.optim.fp16_optimizer import FP16Optimizer
from common.fairseq.optim.fused_adam import get_fused_adam_class
from common.utils import print_once
def lr_poly_policy(step, optimizer, lr, initial_lr_scale=0.0,
final_lr_scale=0.0, warmup_steps=1000, hold_steps=0,
num_steps=None, power=1.0):
"""Polynomial decay LR policy with an optional hold period."""
assert step >= 1
assert num_steps is not None
assert power is not None
start_lr = initial_lr_scale * lr
end_lr = final_lr_scale * lr
if step <= warmup_steps:
new_lr = start_lr + (step) / warmup_steps * (lr - start_lr)
elif step <= warmup_steps + hold_steps:
new_lr = lr
elif warmup_steps + hold_steps < step <= num_steps:
remain = 1 - (step - warmup_steps) / (num_steps - warmup_steps)
new_lr = (lr - end_lr) * remain ** power + end_lr
else:
new_lr = end_lr
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def lr_exp_policy(step, optimizer, initial_lr_scale, lr, final_lr_scale=0.0,
warmup_steps=1000, hold_steps=0, num_steps=float('inf'),
decay=None):
"""Exponential LR policy with an optional hold period.
If `decay` factor is not supplied, it is calculated to reach `end_lr`
on `num_steps` steps.
Args:
num_steps (int): Limits the number of decay steps.
end_lr (float): The lowest possible LR.
decay (float or None): Decay factor; if None, the it will be derived
from `num_steps` and `end_lr`.
"""
assert step >= 1
start_lr = initial_lr_scale * lr
end_lr = final_lr_scale * lr
if decay is None:
assert not math.isinf(num_steps) and end_lr > 0.0
decay_steps = num_steps - warmup_steps - hold_steps
decay = math.log(end_lr / lr) / decay_steps
else:
decay = math.log(decay)
if step <= warmup_steps:
new_lr = start_lr + (step) / warmup_steps * (lr - start_lr)
elif step <= warmup_steps + hold_steps:
new_lr = lr
else:
a = math.exp(decay * (min(step, num_steps) - warmup_steps - hold_steps))
new_lr = max(a * lr, end_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def get_optimizer(model, args):
kw = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'adam' and (args.fp16 or args.bf16):
print_once('WARNING: Using Fairseq FP16Optimizer')
# based on fairseq.optim.FP16Optimizer.build_optimizer
flatten = True # not args.fp16_no_flatten_grads
args.betas = args.adam_betas
args.eps = args.adam_eps
params = list(filter(lambda p: p.requires_grad, model.parameters()))
fp32_params = FP16Optimizer.build_fp32_params(args, params,
flatten=flatten)
# based on fairseq.optim.build_optimizer
def build_optimizer(cfg, params, *extra_args, **extra_kwargs):
if all(isinstance(p, dict) for p in params):
params = [t for p in params for t in p.values()]
params = list(filter(lambda p: p.requires_grad, params))
return FairseqAdam(cfg, params, *extra_args, **extra_kwargs)
if flatten:
fp32_optimizer = build_optimizer(args, [fp32_params])
else:
fp32_optimizer = build_optimizer(args, fp32_params)
if flatten and not fp32_optimizer.supports_flat_params:
raise RuntimeError(
f"chosen optimizer {fp32_optimizer.__class__.__name__} does "
"not support flat params, please set --fp16-no-flatten-grads"
)
kwargs = {}
optimizer = FP16Optimizer(args, params, fp32_optimizer, fp32_params,
**kwargs)
elif args.optimizer == 'adam' and not (args.fp16 or args.bf16):
print_once('WARNING: Using FusedAdam instead of Adam')
kw.update({'betas': args.adam_betas, 'eps': args.adam_eps})
fused_adam_cls = get_fused_adam_class()
optimizer = fused_adam_cls(model.parameters(), **kw)
else:
raise ValueError(f'Invalid optimizer "{args.optimizer}"')
return optimizer
|
TensorFlow/LanguageModeling/BERT/data | data | README | Steps to reproduce datasets from web
1) Build the container
* docker build -t bert_tf .
2) Run the container interactively
* nvidia-docker run -it --ipc=host bert_tf
* Optional: Mount data volumes
* -v yourpath:/workspace/bert/data/wikipedia_corpus/download
* -v yourpath:/workspace/bert/data/wikipedia_corpus/extracted_articles
* -v yourpath:/workspace/bert/data/wikipedia_corpus/raw_data
* -v yourpath:/workspace/bert/data/wikipedia_corpus/intermediate_files
* -v yourpath:/workspace/bert/data/wikipedia_corpus/final_text_file_single
* -v yourpath:/workspace/bert/data/wikipedia_corpus/final_text_files_sharded
* -v yourpath:/workspace/bert/data/wikipedia_corpus/final_tfrecords_sharded
* -v yourpath:/workspace/bert/data/bookcorpus/download
* -v yourpath:/workspace/bert/data/bookcorpus/final_text_file_single
* -v yourpath:/workspace/bert/data/bookcorpus/final_text_files_sharded
* -v yourpath:/workspace/bert/data/bookcorpus/final_tfrecords_sharded
* Optional: Select visible GPUs
* -e CUDA_VISIBLE_DEVICES=0
** Inside of the container starting here**
3) Download pretrained weights (they contain vocab files for preprocessing)
* cd data/pretrained_models_google && python3 download_models.py
4) "One-click" SQuAD download
* cd /workspace/bert/data/squad && . squad_download.sh
5) "One-click" Wikipedia data download and prep (provides tfrecords)
* Set your configuration in data/wikipedia_corpus/config.sh
* cd /data/wikipedia_corpus && ./run_preprocessing.sh
6) "One-click" BookCorpus data download and prep (provided tfrecords)
* Set your configuration in data/wikipedia_corpus/config.sh
* cd /data/bookcorpus && ./run_preprocessing.sh
|
TensorFlow/Detection/SSD/models/research/object_detection/models | models | ssd_mobilenet_v1_ppn_feature_extractor | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SSDFeatureExtractor for MobilenetV1 PPN features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
class SSDMobileNetV1PpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 PPN features."""
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
feature_maps = feature_map_generators.pooling_pyramid_feature_maps(
base_feature_map_depth=0,
num_layers=6,
image_features={
'image_features': image_features['Conv2d_11_pointwise']
})
return feature_maps.values()
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2 | tacotron2 | tacotron2Builder | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "tacotron2Builder.h"
#include "decoderBuilderPlain.h"
#include "decoderBuilderPlugins.h"
#include "encoderBuilder.h"
#include "jsonModelImporter.h"
#include "postNetBuilder.h"
#include "utils.h"
#include <iostream>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const int NUM_EMBEDDING_DIMENSIONS = 512;
constexpr const int NUM_ENCODING_DIMENSIONS = 512;
constexpr const int NUM_ATTENTION_DIMENSIONS = 128;
constexpr const int MEL_CHANNELS = 80;
constexpr const int MAX_MEL_CHUNK = 80;
constexpr const int TOTAL_CHUNK_SIZE = MEL_CHANNELS * MAX_MEL_CHUNK;
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
Tacotron2Builder::Tacotron2Builder(const std::string& modelFilePath)
: mModelFilePath(modelFilePath)
, mMelChannels(MEL_CHANNELS)
{
// do nothing
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
std::vector<TRTPtr<ICudaEngine>> Tacotron2Builder::build(
const int maxInputLength,
IBuilder& builder,
const int maxBatchSize,
const bool useFP16)
{
// configure tensor-rt objects
std::unique_ptr<IModelImporter> importer;
if (Utils::hasExtension(mModelFilePath, ".json"))
{
importer.reset(new JSONModelImporter(mModelFilePath));
}
else
{
throw std::runtime_error("Unrecognized model filename type: '" + mModelFilePath + "'");
}
std::vector<TRTPtr<ICudaEngine>> engines;
EncoderBuilder encoderBuilder(
NUM_EMBEDDING_DIMENSIONS, NUM_ENCODING_DIMENSIONS, NUM_ATTENTION_DIMENSIONS, maxInputLength);
engines.emplace_back(encoderBuilder.build(builder, *importer, maxBatchSize, useFP16));
DecoderBuilderPlain decoderBuilderPlain(maxInputLength, NUM_EMBEDDING_DIMENSIONS, mMelChannels);
engines.emplace_back(decoderBuilderPlain.build(builder, *importer, maxBatchSize, useFP16));
DecoderBuilderPlugins decoderBuilderPlugins(NUM_EMBEDDING_DIMENSIONS, mMelChannels);
engines.emplace_back(decoderBuilderPlugins.build(builder, *importer, 1, 1, maxInputLength, useFP16));
PostNetBuilder postnetBuilder(mMelChannels, MAX_MEL_CHUNK, NUM_EMBEDDING_DIMENSIONS);
engines.emplace_back(postnetBuilder.build(builder, *importer, maxBatchSize, useFP16));
return engines;
}
} // namespace tts
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | __main__ | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import ExperimentFinalizer
from .maintainer import DockerMaintainer
from .preparer import ExperimentPreparer
from .runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) |
DGLPyTorch/DrugDiscovery/SE3Transformer/scripts | scripts | benchmark_train | #!/usr/bin/env bash
# Script to benchmark single-GPU training performance, with bases precomputation
# CLI args with defaults
BATCH_SIZE=${1:-240}
AMP=${2:-true}
CUDA_VISIBLE_DEVICES=0 python -m se3_transformer.runtime.training \
--amp "$AMP" \
--batch_size "$BATCH_SIZE" \
--epochs 16 \
--use_layer_norm \
--norm \
--save_ckpt_path model_qm9.pth \
--task homo \
--precompute_bases \
--seed 42 \
--benchmark
|
PyTorch/Segmentation/nnUNet | nnUNet | main | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from data_loading.data_module import DataModule
from nnunet.nn_unet import NNUnet
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, ModelSummary, RichProgressBar
from pytorch_lightning.plugins.io import AsyncCheckpointIO
from pytorch_lightning.strategies import DDPStrategy
from utils.args import get_main_args
from utils.logger import LoggingCallback
from utils.utils import make_empty_dir, set_cuda_devices, set_granularity, verify_ckpt_path
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
def get_trainer(args, callbacks):
return Trainer(
logger=False,
default_root_dir=args.results,
benchmark=True,
deterministic=False,
max_epochs=args.epochs,
precision=16 if args.amp else 32,
gradient_clip_val=args.gradient_clip_val,
enable_checkpointing=args.save_ckpt,
callbacks=callbacks,
num_sanity_val_steps=0,
accelerator="gpu",
devices=args.gpus,
num_nodes=args.nodes,
plugins=[AsyncCheckpointIO()],
strategy=DDPStrategy(
find_unused_parameters=False,
static_graph=True,
gradient_as_bucket_view=True,
),
limit_train_batches=1.0 if args.train_batches == 0 else args.train_batches,
limit_val_batches=1.0 if args.test_batches == 0 else args.test_batches,
limit_test_batches=1.0 if args.test_batches == 0 else args.test_batches,
)
def main():
args = get_main_args()
set_granularity()
set_cuda_devices(args)
if args.seed is not None:
seed_everything(args.seed)
data_module = DataModule(args)
data_module.setup()
ckpt_path = verify_ckpt_path(args)
if ckpt_path is not None:
model = NNUnet.load_from_checkpoint(ckpt_path, strict=False, args=args)
else:
model = NNUnet(args)
callbacks = [RichProgressBar(), ModelSummary(max_depth=2)]
if args.benchmark:
batch_size = args.batch_size if args.exec_mode == "train" else args.val_batch_size
filnename = args.logname if args.logname is not None else "perf.json"
callbacks.append(
LoggingCallback(
log_dir=args.results,
filnename=filnename,
global_batch_size=batch_size * args.gpus * args.nodes,
mode=args.exec_mode,
warmup=args.warmup,
dim=args.dim,
)
)
elif args.exec_mode == "train":
if args.save_ckpt:
callbacks.append(
ModelCheckpoint(
dirpath=f"{args.ckpt_store_dir}/checkpoints",
filename="{epoch}-{dice:.2f}",
monitor="dice",
mode="max",
save_last=True,
)
)
trainer = get_trainer(args, callbacks)
if args.benchmark:
if args.exec_mode == "train":
trainer.fit(model, train_dataloaders=data_module.train_dataloader())
else:
# warmup
trainer.test(model, dataloaders=data_module.test_dataloader(), verbose=False)
# benchmark run
model.start_benchmark = 1
trainer.test(model, dataloaders=data_module.test_dataloader(), verbose=False)
elif args.exec_mode == "train":
trainer.fit(model, datamodule=data_module)
elif args.exec_mode == "evaluate":
trainer.validate(model, dataloaders=data_module.val_dataloader())
elif args.exec_mode == "predict":
if args.save_preds:
ckpt_name = "_".join(args.ckpt_path.split("/")[-1].split(".")[:-1])
dir_name = f"predictions_{ckpt_name}"
dir_name += f"_task={model.args.task}_fold={model.args.fold}"
if args.tta:
dir_name += "_tta"
save_dir = os.path.join(args.results, dir_name)
model.save_dir = save_dir
make_empty_dir(save_dir)
model.args = args
trainer.test(model, dataloaders=data_module.test_dataloader())
if __name__ == "__main__":
main()
|
PyTorch/SpeechSynthesis/Tacotron2/filelists | filelists | ljs_mel_text_train_subset_625_filelist | LJSpeech-1.1/mels/LJ040-0100.pt|she would sometimes take Lee with her, apparently leaving him alone in the car while she transacted her business.
LJSpeech-1.1/mels/LJ011-0248.pt|Howard, strange to say, making no attempt to detain him; probably because Mullay promised to return a few days later, and to bring more money.
LJSpeech-1.1/mels/LJ016-0442.pt|made a determined effort to burn himself to death by throwing himself bodily on to the fire in the condemned ward.
LJSpeech-1.1/mels/LJ026-0036.pt|and then a balance must be struck and the doubtful form placed in the kingdom with which it has, on the whole, most points in common.
LJSpeech-1.1/mels/LJ042-0176.pt|One offers oppression, the other poverty. Both offer imperialistic injustice, tinted with two brands of slavery, end quote.
LJSpeech-1.1/mels/LJ003-0323.pt|Drunkenness, if it ever occurred, should be visited with severe punishment;
LJSpeech-1.1/mels/LJ045-0161.pt|He was upset over the fact that I would not answer him.
LJSpeech-1.1/mels/LJ028-0187.pt|Cyrus decided that Babylon must be taken.
LJSpeech-1.1/mels/LJ037-0178.pt|or one used Remington-Peters cartridge case, which may have been in the revolver before the shooting,
LJSpeech-1.1/mels/LJ010-0164.pt|Oxford, who was only nineteen at the time his offense was committed, had been born at Birmingham,
LJSpeech-1.1/mels/LJ019-0178.pt|and abandoned because of the expense. As to the entire reconstruction of Newgate, nothing had been done as yet.
LJSpeech-1.1/mels/LJ050-0117.pt|particularly those arising from organized groups, within their special jurisdiction.
LJSpeech-1.1/mels/LJ033-0128.pt|that the bag Oswald carried contained the assassination weapon and has concluded that Frazier and Randle are mistaken as to the length of the bag.
LJSpeech-1.1/mels/LJ007-0179.pt|defeats the ends of justice, and disgraces the profession of a Christian country.
LJSpeech-1.1/mels/LJ033-0067.pt|She pointed to the blanket which was on the floor very close to where Ruth Paine was standing.
LJSpeech-1.1/mels/LJ004-0139.pt|"In the morning the stench and heat were so oppressive that he and every one else on waking rushed unclothed into the yard;"
LJSpeech-1.1/mels/LJ009-0208.pt|erected on the cart, about four feet high at the head, and gradually sloping towards the horse, giving a full view of the body,
LJSpeech-1.1/mels/LJ012-0144.pt|and passed it on to Solomons by his daughter, a widow named Abrahams.
LJSpeech-1.1/mels/LJ001-0020.pt|the "lower-case" being in fact invented in the early Middle Ages.
LJSpeech-1.1/mels/LJ014-0227.pt|One of these was Mobbs, who lived in the Minories,
LJSpeech-1.1/mels/LJ040-0146.pt|He noted that Lee liked to give the impression that he did not care for other people but preferred to keep to himself,
LJSpeech-1.1/mels/LJ001-0149.pt|From the time when books first took their present shape till the end of the sixteenth century, or indeed later,
LJSpeech-1.1/mels/LJ002-0143.pt|The commissioners who presided were, quote, little otherwise than self-elected
LJSpeech-1.1/mels/LJ014-0217.pt|Dwyer managed to overpower his assailant, and got to his feet; but Cannon butted at him with his head, and again threw him to the ground,
LJSpeech-1.1/mels/LJ005-0250.pt|The prisoners were crowded together in the jail, contrary to the requirements of the four George the fourth
LJSpeech-1.1/mels/LJ042-0049.pt|I never believed I would find more material advantages at this stage of development in the Soviet Union than I might of had in the U.S.
LJSpeech-1.1/mels/LJ014-0198.pt|Marley at his trial was undefended, and the sheriffs offered him counsel; but he declined. The witnesses against him all spoke the truth, he said;
LJSpeech-1.1/mels/LJ034-0093.pt|Brennan also testified that Lee Harvey Oswald,
LJSpeech-1.1/mels/LJ016-0237.pt|With Calcraft's method there were undoubtedly many failures, and it was a common custom for him to go below the gallows
LJSpeech-1.1/mels/LJ015-0156.pt|Down at Weybridge, where he had a country place, his name was long remembered with gratitude by the poor.
LJSpeech-1.1/mels/LJ018-0047.pt|He adhered to this almost to the very last. His case had been warmly espoused by the Society for the Protection of Germans in this country,
LJSpeech-1.1/mels/LJ013-0020.pt|he acted in a manner which excited the suspicions of the crew.
LJSpeech-1.1/mels/LJ002-0041.pt|Two other wards were appropriated to the master's side debtors; they were each twenty-three feet by fourteen and a half,
LJSpeech-1.1/mels/LJ008-0227.pt|slipshod and slovenly, in crushed bonnet and dirty shawl, the gown fastened by a single hook,
LJSpeech-1.1/mels/LJ007-0029.pt|The condition of the capitally-convicted prisoners after sentence was still very disgraceful. The side they occupied, still known as the press-yard,
LJSpeech-1.1/mels/LJ018-0358.pt|Christina Edmunds had resort to strychnia, the same lethal drug that Palmer used;
LJSpeech-1.1/mels/LJ007-0198.pt|The windows were to be glazed and painted to prevent prisoners from looking out;
LJSpeech-1.1/mels/LJ043-0032.pt|After about a two-week separation, Marina Oswald returned to her husband.
LJSpeech-1.1/mels/LJ035-0071.pt|At a given signal, they reenacted the event. Baker's movements were timed with a stopwatch.
LJSpeech-1.1/mels/LJ009-0092.pt|his legs give way, he utters a faint groan, and sinks on the floor.
LJSpeech-1.1/mels/LJ019-0310.pt|which had long been admitted as indispensable, and had never as yet been properly obtained.
LJSpeech-1.1/mels/LJ038-0071.pt|When he entered the homicide and robbery bureau office, he saw two detectives standing there with Sgt. Gerald L. Hill,
LJSpeech-1.1/mels/LJ014-0291.pt|he showed symptoms of delirium tremens, and admitted that he had been addicted to the excessive use of stimulants.
LJSpeech-1.1/mels/LJ014-0283.pt|The jury found him guilty of the latter only, with a point of law reserved. This was fully argued before three judges,
LJSpeech-1.1/mels/LJ021-0096.pt|under the able and energetic leadership of General Johnson.
LJSpeech-1.1/mels/LJ045-0075.pt|She was, quote, sorry that I had not married him (the Russian boyfriend) instead, that it would have been much easier for me, end quote.
LJSpeech-1.1/mels/LJ022-0203.pt|For that we can be thankful to the God who watches over America.
LJSpeech-1.1/mels/LJ029-0073.pt|that the President would arrive and depart from Dallas' Love Field; that a motorcade through the downtown area of Dallas to the luncheon site should be arranged;
LJSpeech-1.1/mels/LJ040-0187.pt|According to Sokolow, this indicated a, quote, present intellectual functioning in the upper range of bright normal intelligence, end quote.
LJSpeech-1.1/mels/LJ016-0101.pt|One of the three, shamming ill, remained all day in his ward, where he employed himself unraveling the rope from the sleeping-mats.
LJSpeech-1.1/mels/LJ015-0086.pt|He kept open house at Kilburn Priory;
LJSpeech-1.1/mels/LJ028-0427.pt|The enormous amount of debris which buried the palaces and temples and walls of Nebuchadnezzar's city, in places to the depth of a hundred feet,
LJSpeech-1.1/mels/LJ048-0248.pt|President Kennedy was scheduled to speak across the street from his hotel in Fort Worth at eight:thirty a.m.
LJSpeech-1.1/mels/LJ021-0095.pt|We are now prepared to move into this second phase, on the basis of our experience in the first phase
LJSpeech-1.1/mels/LJ030-0081.pt|They were instructed to watch particularly for thrown objects, sudden actions in the crowd, and any movements toward the Presidential car.
LJSpeech-1.1/mels/LJ032-0176.pt|Moreover, the bus transfer which he obtained as he left the bus was still in the pocket when he was arrested.
LJSpeech-1.1/mels/LJ044-0129.pt|and often it is advisable for some people to remain in the background, not underground, end quote.
LJSpeech-1.1/mels/LJ018-0177.pt|But as there was no independent corroboration of the informer's evidence, according to the custom of the British law,
LJSpeech-1.1/mels/LJ049-0113.pt|This point was ably made in the nineteen oh two debate by Senator George F. Hoar, the sponsor of the Senate bill, quote,
LJSpeech-1.1/mels/LJ050-0141.pt|As a beginning step to improve liaison with local law enforcement officials, the Secret Service on August twenty-six, nineteen sixty-four,
LJSpeech-1.1/mels/LJ013-0156.pt|a scion of the ducal house of Bedford, by his confidential valet and personal attendant.
LJSpeech-1.1/mels/LJ032-0222.pt|Moreover, Shaneyfelt testified that in his opinion the photographs were not composites of two different photographs
LJSpeech-1.1/mels/LJ004-0052.pt|which Howard had eulogized some forty years before.
LJSpeech-1.1/mels/LJ006-0017.pt|with those who made the selection of the first inspectors, and the two gentlemen appointed were probably the most fitted in England to be so employed.
LJSpeech-1.1/mels/LJ049-0046.pt|Even so, analysis of the motion picture films taken by amateur photographer Zapruder
LJSpeech-1.1/mels/LJ017-0124.pt|He frequently declared before and during the trial that it would be impossible to find him guilty.
LJSpeech-1.1/mels/LJ048-0150.pt|while the Secret Service representatives in Dallas
LJSpeech-1.1/mels/LJ017-0082.pt|He fixed upon a sporting friend, Mr. John Parsons Cook, who had been in luck at Shrewsbury races, both as a winner and a backer,
LJSpeech-1.1/mels/LJ041-0095.pt|Oswald read a good deal, said Powers, but, quote, he would never be reading any of the shoot-em-up westerns or anything like that.
LJSpeech-1.1/mels/LJ002-0089.pt|eight. The female felons were deprived of part of the space which the architect had intended for them.
LJSpeech-1.1/mels/LJ050-0264.pt|The Commission recommends that the present arrangements
LJSpeech-1.1/mels/LJ039-0177.pt|was greater than from the second to the third shot and required a movement in the basic firing position of the marksmen.
LJSpeech-1.1/mels/LJ047-0016.pt|The FBI opened a file on Oswald in October nineteen fifty-nine, when news reports appeared of his defection to the Soviet Union.
LJSpeech-1.1/mels/LJ028-0036.pt|But in those very early days Babylon was little more than a shrine, surrounded with mud huts and date palms.
LJSpeech-1.1/mels/LJ013-0173.pt|The researches of the police soon laid bare other suspicious facts.
LJSpeech-1.1/mels/LJ014-0138.pt|Mrs. Manning became still more violent, shouting, "No, no, I will not stand it! You ought to be ashamed of yourselves!"
LJSpeech-1.1/mels/LJ028-0165.pt|There is, however, a second inner wall, of less thickness than the first, but very little inferior to it in strength.
LJSpeech-1.1/mels/LJ006-0048.pt|To these were still added an average of about fifty expecting the last penalty of the law; a certain number of transports awaiting removal to the colonies;
LJSpeech-1.1/mels/LJ032-0133.pt|Lieutenant Day of the Dallas Police Department had "lifted" a palmprint from the underside of the gun barrel
LJSpeech-1.1/mels/LJ038-0093.pt|Frequently, however, he was confronted with evidence which he could not explain, and he resorted to statements which are known to be lies.
LJSpeech-1.1/mels/LJ018-0228.pt|Five or six years later, William Roupell minutely described how he had effected the fraud.
LJSpeech-1.1/mels/LJ046-0084.pt|for the President soon after the assassination, quote,
LJSpeech-1.1/mels/LJ033-0109.pt|the Commission has carefully considered the testimony of these two witnesses with regard to the length of the bag.
LJSpeech-1.1/mels/LJ013-0158.pt|One morning in May his lordship was found dead in his bed with his throat cut.
LJSpeech-1.1/mels/LJ036-0111.pt|Whaley's memory of the lineup is inaccurate. There were four men altogether, not six men, in the lineup with Oswald.
LJSpeech-1.1/mels/LJ044-0082.pt|His attempt to express himself through his Fair Play for Cuba activities, however,
LJSpeech-1.1/mels/LJ036-0208.pt|white male, approximately thirty, slender build, height five foot ten inches, weight one hundred sixty-five pounds, end quote.
LJSpeech-1.1/mels/LJ038-0255.pt|Firearms identification.
LJSpeech-1.1/mels/LJ031-0111.pt|The elliptical wound in the Governor's back, located slightly to the left of the Governor's right armpit approximately five-eighths inch (a centimeter and a half)
LJSpeech-1.1/mels/LJ006-0246.pt|On another occasion a young man, who was being violently teased, seized a knife and stabbed his tormentor in the back.
LJSpeech-1.1/mels/LJ027-0167.pt|Then the gills gradually dry up, as the lungs develop, and they now breathe wholly by lungs, but still retain the tail.
LJSpeech-1.1/mels/LJ033-0187.pt|However, the complete identity of characteristics between the paper and tape in the bag found on the sixth floor
LJSpeech-1.1/mels/LJ009-0284.pt|It was stated in evidence before the Commission on Capital Punishment in eighteen sixty-four,
LJSpeech-1.1/mels/LJ009-0249.pt|When Charles White was executed in eighteen twenty-three for arson, he arranged a handkerchief
LJSpeech-1.1/mels/LJ015-0149.pt|peas at ten shillings a quart, five-guinea pines, and early asparagus were to be found on his table.
LJSpeech-1.1/mels/LJ019-0330.pt|Dietaries were drawn up for adoption on the recommendation of a committee of experts.
LJSpeech-1.1/mels/LJ012-0118.pt|It was a large gold brooch set in pearls, but a portion of the mounting had melted with the heat.
LJSpeech-1.1/mels/LJ008-0071.pt|In the few years which elapsed between the establishment of the gallows at Newgate
LJSpeech-1.1/mels/LJ015-0253.pt|he handed over to Pierce a sum of three thousand pounds, his own, whether rightly or wrongly acquired never came out,
LJSpeech-1.1/mels/LJ045-0102.pt|things apparently went quite smoothly from the time Oswald returned from Mexico until the weekend of November sixteen to seventeen, nineteen sixty-three.
LJSpeech-1.1/mels/LJ009-0256.pt|Still he resisted.
LJSpeech-1.1/mels/LJ050-0055.pt|that the PRS files can no longer be limited largely to persons communicating actual threats to the President.
LJSpeech-1.1/mels/LJ034-0037.pt|Someone sitting on the box facing the window would have his palm in this position if he placed his hand alongside his right hip.
LJSpeech-1.1/mels/LJ020-0081.pt|and knead for ten minutes, carefully at first, lest the liquids should be wasted, and more boldly when they are absorbed by the paste.
LJSpeech-1.1/mels/LJ009-0077.pt|The ordinary of Newgate is an orthodox, unaffected, Church of England divine,
LJSpeech-1.1/mels/LJ008-0107.pt|in his canonicals, and with his head as stiffly erect as a sheriff's coachman.
LJSpeech-1.1/mels/LJ043-0013.pt|Part of the problem resulted from the fact that, as Jeanne De Mohrenschildt testified,
LJSpeech-1.1/mels/LJ037-0225.pt|five foot eight inches, black hair, slender, wearing a white jacket, white shirt and dark slacks, end quote,
LJSpeech-1.1/mels/LJ012-0294.pt|without hesitation brought in a verdict of willful murder.
LJSpeech-1.1/mels/LJ042-0192.pt|are preferred rather than loud and useless manifestations of protest, end quote, Oswald went on to note, quote,
LJSpeech-1.1/mels/LJ016-0078.pt|but had to come down again covered with soot and filth just as the officers entered the ward.
LJSpeech-1.1/mels/LJ028-0174.pt|Other ancient descriptions of the walls have been left us by Ctesias of the fifth century B.C., and by Strabo of the beginning of the Christian era,
LJSpeech-1.1/mels/LJ019-0002.pt|The time at length approached when a radical and complete change was to come over the old city jail.
LJSpeech-1.1/mels/LJ032-0271.pt|(two) Oswald's palmprint was on the rifle in a position which shows that he had handled it while it was disassembled,
LJSpeech-1.1/mels/LJ018-0325.pt|But extra precautions and close supervision have so far proved effectual, and the prisoners are still in custody after a lapse of ten years.
LJSpeech-1.1/mels/LJ048-0259.pt|However, Chief Rowley did not condone the action of the off-duty agents, particularly since it violated a regulation of the Secret Service,
LJSpeech-1.1/mels/LJ009-0099.pt|Meanwhile the clergyman, still bent into the form of a sleeping dog,
LJSpeech-1.1/mels/LJ034-0180.pt|The man was dressed in a light-colored, open-neck shirt which could have been either a sports shirt or a T-shirt,
LJSpeech-1.1/mels/LJ024-0057.pt|Why then should we leave the fulfillment of this public policy to chance
LJSpeech-1.1/mels/LJ018-0260.pt|Mr. Justice Byles, in passing sentence, commented severely upon the commission of such crimes by a man in Roupell's position in life,
LJSpeech-1.1/mels/LJ007-0095.pt|Prisoners indeed were known to boast that they had saved their necks by feigning insanity.
LJSpeech-1.1/mels/LJ005-0117.pt|Numbers of the jails were still unprovided with chaplains, and the prisoners never heard Divine service.
LJSpeech-1.1/mels/LJ006-0168.pt|to taking the descriptions of newly-arrived prisoners.
LJSpeech-1.1/mels/LJ011-0117.pt|devoted its efforts first to a mitigation of the forgery statute, but could not immediately accomplish much.
LJSpeech-1.1/mels/LJ007-0223.pt|The prison officials appear to be on the side of the inspectors, to the great dissatisfaction of the corporation, who claimed the full allegiance and support of its servants.
LJSpeech-1.1/mels/LJ009-0176.pt|Seven other crimes, however, were still capital by law, and so continued till the passing of the Criminal Consolidation Acts of eighteen sixty-one.
LJSpeech-1.1/mels/LJ034-0119.pt|Approximately seven or eight minutes later
LJSpeech-1.1/mels/LJ014-0226.pt|Only a few have vied with Cannon in fiendish cruelty and brutality.
LJSpeech-1.1/mels/LJ045-0074.pt|In the letter Marina Oswald stated that her husband had changed a great deal and that she was very lonely in the United States.
LJSpeech-1.1/mels/LJ012-0044.pt|When his trade was busiest he set up a second establishment, at the head of which, although he was married,
LJSpeech-1.1/mels/LJ027-0012.pt|All have the same ultimate substance
LJSpeech-1.1/mels/LJ028-0254.pt|The people, enjoying the greater freedom which Cyrus permitted them, were contented, and life in Babylon went on about as before.
LJSpeech-1.1/mels/LJ002-0326.pt|The poor debtors were not supplied with beds. Those who could pay the price might hire them from each other,
LJSpeech-1.1/mels/LJ014-0259.pt|Watts led two lives.
LJSpeech-1.1/mels/LJ035-0067.pt|from the sixth floor by the time Baker and Truly arrived, Commission counsel asked Baker and Truly to repeat their movements from the time of the shot
LJSpeech-1.1/mels/LJ010-0146.pt|Attacks upon the sovereign, as I have said, became more common after the accession of the young Queen Victoria in eighteen thirty-eight.
LJSpeech-1.1/mels/LJ007-0084.pt|The inspectors in the following year, on examining the facts, found that some of these poor creatures had been in confinement for long periods:
LJSpeech-1.1/mels/LJ049-0204.pt|While in accordance with its mandate
LJSpeech-1.1/mels/LJ011-0035.pt|Every endeavor was used, however, to obtain a commutation of sentence. His case was twice argued before the judges on points of law,
LJSpeech-1.1/mels/LJ021-0001.pt|The Fireside Chats of Franklin Delano Roosevelt, by Franklin D Roosevelt, Section six.
LJSpeech-1.1/mels/LJ008-0148.pt|One night he was missing
LJSpeech-1.1/mels/LJ011-0237.pt|The jewelers were always a favorite prey of the London thieves.
LJSpeech-1.1/mels/LJ017-0272.pt|"Ah!" he remarked, "they will have to wait for us then till eight."
LJSpeech-1.1/mels/LJ049-0067.pt|the radio net in use in motorcades is elaborate and permits a number of different means of communication with various local points.
LJSpeech-1.1/mels/LJ032-0171.pt|and that this was the same shirt which Oswald wore on the morning of the assassination.
LJSpeech-1.1/mels/LJ048-0132.pt|which would bring to bear the judgment and experience of members of the White House detail other than the advance agent.
LJSpeech-1.1/mels/LJ006-0025.pt|France had sent Misseurs Beaumont and De Tocqueville, who subsequently published several interesting works on the subject.
LJSpeech-1.1/mels/LJ043-0176.pt|If the attack had succeeded and Oswald had been caught, the pictures showing him with his rifle
LJSpeech-1.1/mels/LJ044-0191.pt|Now there appeared to be no chance to get to Cuba, where he had thought he might find his communist ideal. The U.S. Government would not permit travel there
LJSpeech-1.1/mels/LJ038-0011.pt|A police car made a U-turn, and as the sirens grew fainter,
LJSpeech-1.1/mels/LJ002-0244.pt|but its business was much reduced by the extension of the Courts of Conscience.
LJSpeech-1.1/mels/LJ031-0209.pt|X-rays and photographs were taken preliminarily and the pathological examination began at about eight p.m.
LJSpeech-1.1/mels/LJ042-0032.pt|and of his initial commitment to that country can best be understood, however, in the context
LJSpeech-1.1/mels/LJ009-0132.pt|Although this misapplication of religious services still went on,
LJSpeech-1.1/mels/LJ034-0048.pt|The freshness of prints developed in this manner cannot be estimated,
LJSpeech-1.1/mels/LJ043-0023.pt|and helped to move the personal effects of Marina Oswald and the baby.
LJSpeech-1.1/mels/LJ015-0216.pt|This was an important step, and they might easily be robbed some day when Burgess was the guard, provided only that they could be opened.
LJSpeech-1.1/mels/LJ006-0180.pt|the interior of the jail was more like a bear-garden or the noisy purlieus of a public-house than a prison.
LJSpeech-1.1/mels/LJ016-0342.pt|The first private execution under the new law took place within the precincts of Maidstone Jail.
LJSpeech-1.1/mels/LJ025-0170.pt|for it is only the green parts of the plant which, under the influence of sunlight, have the marvelous power of decomposing carbonic acid,
LJSpeech-1.1/mels/LJ047-0076.pt|In New Orleans. In the middle of May of nineteen sixty-three, Agent Hosty checked Oswald's last known residence and found that he had moved.
LJSpeech-1.1/mels/LJ005-0011.pt|were first made use of about eighteen twenty-seven. That the need for prison reform was imperative may be gathered from the few out of many instances I have adduced,
LJSpeech-1.1/mels/LJ033-0142.pt|because the cartons stacked around the southeast corner would shield him.
LJSpeech-1.1/mels/LJ018-0005.pt|the public mind was greatly agitated by the affair for several months. The story of the murder must be pretty familiar to most of my readers.
LJSpeech-1.1/mels/LJ049-0183.pt|regarding such threats and that its Protective Research Section is not adequately staffed or equipped
LJSpeech-1.1/mels/LJ036-0031.pt|and requested a transfer which she might use if she got through the traffic.
LJSpeech-1.1/mels/LJ011-0285.pt|The door of his place of durance stood open, and Mr. Gee began to consider whether he might not escape.
LJSpeech-1.1/mels/LJ041-0114.pt|three months prior to his regularly scheduled separation date, ostensibly to care for his mother who had been injured in an accident at her work.
LJSpeech-1.1/mels/LJ012-0134.pt|Presently the proper person arrived from the consignees, but found the gold-dust gone.
LJSpeech-1.1/mels/LJ011-0005.pt|A lady in the country, who had thirteen thousand pounds in the stocks, desired her London agent to sell them out.
LJSpeech-1.1/mels/LJ028-0087.pt|Such was the appearance of the builder of the walls of Babylon.
LJSpeech-1.1/mels/LJ016-0329.pt|a bill was introduced by Mr. Hibbert, M.P., and accepted by the Government, providing for the future carrying out of executions within prisons.
LJSpeech-1.1/mels/LJ034-0017.pt|could look southwesterly down Elm Street over the top of the "Rolling Readers" cartons.
LJSpeech-1.1/mels/LJ044-0086.pt|executive director of the Information Council of the Americas, who also appeared on the program.
LJSpeech-1.1/mels/LJ038-0100.pt|On November twenty-three, Fritz confronted Oswald with the evidence that he had purchased a rifle under the fictitious name of "Hidell."
LJSpeech-1.1/mels/LJ049-0019.pt|The last Presidential vehicle with any protection against small-arms fire left the White House in nineteen fifty-three.
LJSpeech-1.1/mels/LJ021-0125.pt|it was natural that the workers should seek and obtain a statutory declaration of their constitutional right
LJSpeech-1.1/mels/LJ019-0294.pt|The prison buildings were in many places out of repair; other houses often overlooked them.
LJSpeech-1.1/mels/LJ009-0211.pt|and on the right the ripping chisel, with which the murders had been committed, were exposed to view.
LJSpeech-1.1/mels/LJ044-0172.pt|and left for Irving with Marina Oswald and June and most of the Oswalds' effects three days later.
LJSpeech-1.1/mels/LJ047-0129.pt|FBI informants in the New Orleans area, familiar with pro-Castro or Communist Party activity there,
LJSpeech-1.1/mels/LJ024-0139.pt|has been tipped out of balance by the courts in direct contradiction of the high purposes of the framers of the Constitution.
LJSpeech-1.1/mels/LJ005-0106.pt|Jails, of which the old prison at Reading was a specimen, were still left intact.
LJSpeech-1.1/mels/LJ042-0247.pt|In August of nineteen sixty-three, he gave the New Orleans police as a reason for refusing to permit his family to learn English,
LJSpeech-1.1/mels/LJ047-0092.pt|On August nine, nineteen sixty-three,
LJSpeech-1.1/mels/LJ026-0166.pt|back to starch usable as food and the comparison of the green plant and the animal would be complete.
LJSpeech-1.1/mels/LJ033-0019.pt|According to the testimony of Frazier, Marina Oswald, and Ruth Paine, it appears that Oswald never returned to Irving in midweek
LJSpeech-1.1/mels/LJ042-0172.pt|must have as its nucleus the traditional ideological best of both systems, and yet be utterly opposed to both systems.
LJSpeech-1.1/mels/LJ027-0018.pt|All are forced to make concession after concession to their surroundings, and in these concessions all progress in life consists.
LJSpeech-1.1/mels/LJ041-0187.pt|and he wanted to be on the winning side so that ten thousand years from-now people would look in the history books and say, "Well, this man was ahead of his time."
LJSpeech-1.1/mels/LJ048-0286.pt|Nor is this goal served when agents remain out until early morning hours, and lose the opportunity to get a reasonable amount of sleep.
LJSpeech-1.1/mels/LJ018-0037.pt|In searching the prisoner's box, Mr. Briggs' watch was found wrapped up in a piece of leather,
LJSpeech-1.1/mels/LJ009-0044.pt|His features have no felonious cast;
LJSpeech-1.1/mels/LJ045-0100.pt|She thought that he might not have become involved in the assassination if people had been kinder to him.
LJSpeech-1.1/mels/LJ035-0149.pt|She ran inside and up the front stairs into the large open office reserved for clerical employees.
LJSpeech-1.1/mels/LJ028-0188.pt|In five thirty-eight the city fell, and for a time it became the home of the Persian King.
LJSpeech-1.1/mels/LJ003-0320.pt|which recommended restrictions upon the number of visitors admitted.
LJSpeech-1.1/mels/LJ013-0241.pt|The policeman insisted on searching the premises, at which Good displayed some uneasiness.
LJSpeech-1.1/mels/LJ018-0194.pt|Cummings was repeatedly "run in" for the offense of coining and uttering bad money, whether coin or notes.
LJSpeech-1.1/mels/LJ046-0135.pt|PRS received items in eight thousand, seven hundred nine cases.
LJSpeech-1.1/mels/LJ046-0143.pt|These instructions to PRS personnel appear to be the only instance where an effort was made to reduce the criteria to writing.
LJSpeech-1.1/mels/LJ048-0103.pt|and with the concurrence of the Dallas police, was entirely appropriate, in view of the known desires of the President.
LJSpeech-1.1/mels/LJ038-0279.pt|I think is going overboard in the other direction.
LJSpeech-1.1/mels/LJ044-0117.pt|that there were people who understood his activity, end quote.
LJSpeech-1.1/mels/LJ028-0485.pt|The outer and inner defenses of Babylon were so strong and so high that no enemy could hope to take them,
LJSpeech-1.1/mels/LJ031-0174.pt|After the President was pronounced dead,
LJSpeech-1.1/mels/LJ026-0020.pt|If chlorophyll is present, the carbon dioxide of the air serves as a source of carbon,
LJSpeech-1.1/mels/LJ027-0136.pt|Illustrations quoted from the works of Romanes and Le Conte will make this principle clear.
LJSpeech-1.1/mels/LJ002-0113.pt|in an age when insolvent acts and bankruptcy courts do so much to relieve the impecunious,
LJSpeech-1.1/mels/LJ004-0113.pt|It was further ordered that male prisoners should be kept perfectly distinct from the females.
LJSpeech-1.1/mels/LJ044-0115.pt|he felt that this was a great man that he had received the letter from, end quote.
LJSpeech-1.1/mels/LJ039-0012.pt|The Commission first learned of this incident when Robert Oswald related it to FBI agents on February nineteen, nineteen sixty-four,
LJSpeech-1.1/mels/LJ014-0164.pt|as the wickedness and levity of the immense crowd collected at the execution this morning could be imagined by no man,
LJSpeech-1.1/mels/LJ050-0018.pt|and to keep the Secretary fully informed regarding all significant developments relating to Presidential protection.
LJSpeech-1.1/mels/LJ012-0131.pt|The letter informed him of the marks and sizes of the cases containing the precious metal,
LJSpeech-1.1/mels/LJ016-0308.pt|yet the witnesses were not unanimous.
LJSpeech-1.1/mels/LJ028-0332.pt|Once more, however, he waited till the interval appointed had gone by, and then leading the troops to the place where the four thousand were,
LJSpeech-1.1/mels/LJ006-0251.pt|but the presence and authority of the governor himself became indispensable.
LJSpeech-1.1/mels/LJ006-0016.pt|These considerations no doubt had weight
LJSpeech-1.1/mels/LJ031-0093.pt|Answer: No, sir. Before -- well, in trying to treat an acutely injured patient, you have to establish an airway, adequate ventilation
LJSpeech-1.1/mels/LJ042-0163.pt|After, however, two years and a lot of growing up, I decided to return to the USA.
LJSpeech-1.1/mels/LJ031-0220.pt|During the autopsy examination, Federal agents brought the surgeons three pieces of bone recovered from Elm Street and the Presidential automobile.
LJSpeech-1.1/mels/LJ030-0050.pt|The Presidential limousine.
LJSpeech-1.1/mels/LJ012-0010.pt|both having been recognized by the clergyman who had performed the ceremony, and the assault had been committed to secure the money
LJSpeech-1.1/mels/LJ004-0213.pt|Compared with those highly meritorious institutions Newgate still showed but badly.
LJSpeech-1.1/mels/LJ010-0061.pt|That some thirty or more needy men should hope to revolutionize England is a sufficient proof of the absurdity of their attempt.
LJSpeech-1.1/mels/LJ022-0195.pt|But it is more than the recovery of the material basis of our individual lives.
LJSpeech-1.1/mels/LJ039-0102.pt|After familiarization with live ammunition in the twenty-two rifle and the twenty-two pistol,
LJSpeech-1.1/mels/LJ020-0073.pt|Sift the flour, salt and sugar into a bowl,
LJSpeech-1.1/mels/LJ040-0038.pt|Such ideas of grandeur were apparently accompanied by notions of oppression.
LJSpeech-1.1/mels/LJ019-0049.pt|the principles of which were debated by disputants of widely opposite opinions with an earnestness that sometimes bordered upon acrimony.
LJSpeech-1.1/mels/LJ050-0012.pt|through an Assistant Secretary whose duties also include the direct supervision of the Bureau of the Mint
LJSpeech-1.1/mels/LJ007-0117.pt|where the upper ward was exclusively appropriated to their use. They also had their meals sent in, and, with the food, wine almost ad libitum.
LJSpeech-1.1/mels/LJ004-0169.pt|On the dirty bedstead lay a wretched being in the throes of severe illness.
LJSpeech-1.1/mels/LJ019-0127.pt|or the still more costly process of walling in the whole farm, would have greatly added to the charges of these establishments.
LJSpeech-1.1/mels/LJ014-0141.pt|and stretching out her hand, she gathered up a quantity of the rue which, following ancient custom dating from the days of the jail fever,
LJSpeech-1.1/mels/LJ037-0041.pt|The man appeared to step back as the policeman, quote, calmly opened the car door, end quote, and very slowly got out and walked toward the front of the car.
LJSpeech-1.1/mels/LJ012-0023.pt|He was taken up when still in his teens for stealing a pocketbook, and was sentenced to transportation, but did not get beyond the hulks at Chatham.
LJSpeech-1.1/mels/LJ032-0115.pt|A few minutes after the rifle was discovered on the sixth floor of the Depository Building
LJSpeech-1.1/mels/LJ047-0007.pt|It had interviewed him twice shortly after his return to the United States, again a year later at his request
LJSpeech-1.1/mels/LJ006-0049.pt|an occasional prisoner or two committed by the Houses of Parliament, the Courts of King's Bench, Common Pleas,
LJSpeech-1.1/mels/LJ028-0065.pt|Eleven years later, in five eighty-six, he destroyed the sacred Hebrew city,
LJSpeech-1.1/mels/LJ049-0076.pt|The Commission's review of the provisions for Presidential protection at the time of President Kennedy's trip to Dallas demonstrates the need for substantial improvements.
LJSpeech-1.1/mels/LJ003-0091.pt|Constantly associated with these convicted felons were numbers of juveniles, infants of tender years.
LJSpeech-1.1/mels/LJ050-0030.pt|The Commission also recommends
LJSpeech-1.1/mels/LJ013-0122.pt|Stealing plate was about this period the crime of a more aristocratic thief.
LJSpeech-1.1/mels/LJ046-0013.pt|Prompted by these dismaying statistics, the Commission has inquired into the problems and methods of Presidential protection in effect
LJSpeech-1.1/mels/LJ035-0134.pt|that they were watching the parade from the top step of the building entrance when Gloria Calverly, who works in the Depository Building,
LJSpeech-1.1/mels/LJ016-0232.pt|and he owned a pet pony which would follow him about like a dog.
LJSpeech-1.1/mels/LJ020-0023.pt|If too stiff, warm water, a spoonful at a time until you can handle the paste easily. The danger is in getting it too stiff. Now.
LJSpeech-1.1/mels/LJ005-0046.pt|The good it tried to do took active shape in the establishment of temporary refuges -- at Hoxton for males, and in the Hackney Road for females
LJSpeech-1.1/mels/LJ010-0019.pt|As time passed,
LJSpeech-1.1/mels/LJ049-0130.pt|The Secret Service must rely in large part
LJSpeech-1.1/mels/LJ024-0023.pt|ever since a similar proposal passed the House of Representatives in eighteen sixty-nine.
LJSpeech-1.1/mels/LJ018-0315.pt|to whom it was said one hundred pounds apiece had been given down as the price of their infidelity.
LJSpeech-1.1/mels/LJ029-0037.pt|Advance preparations for President Kennedy's visit to Dallas were primarily the responsibility of two Secret Service agents:
LJSpeech-1.1/mels/LJ049-0218.pt|between the Secret Service and the President and his family is contemplated.
LJSpeech-1.1/mels/LJ003-0155.pt|Tailoring and shoemaking was permitted, but it was deemed unsafe to allow a carpenter or blacksmith to have his tools.
LJSpeech-1.1/mels/LJ013-0113.pt|Robberies as daring in conception as they were boldly executed were common enough.
LJSpeech-1.1/mels/LJ045-0047.pt|and I told him that
LJSpeech-1.1/mels/LJ006-0065.pt|were associated together, "of every variety of age, habit, and delinquency, without employment, oversight, or control."
LJSpeech-1.1/mels/LJ003-0316.pt|It should be peremptorily forbidden to the keeper or any officer to make a pecuniary profit out of the supplies of food, fuel, or other necessaries.
LJSpeech-1.1/mels/LJ021-0004.pt|Tonight I continue that report, though, because of the shortness of time, I must defer a number of subjects to a later date.
LJSpeech-1.1/mels/LJ031-0022.pt|Charles R. Baxter, Robert N. McClelland, Ronald C. Jones; the chief neurologist, Dr. William Kemp Clark;
LJSpeech-1.1/mels/LJ007-0030.pt|consisted of two dozen rooms and fifteen cells. In these various chambers, until just before the inspectors made their report,
LJSpeech-1.1/mels/LJ021-0137.pt|Step by step we have created all the government agencies necessary to insure, as a general rule, industrial peace,
LJSpeech-1.1/mels/LJ033-0081.pt|she looked out the breakfast-room window and saw Oswald cross the street and walk toward the driveway where her brother parked his car near the carport.
LJSpeech-1.1/mels/LJ003-0218.pt|The chapel was filled with a curious but callous congregation, who came to stare at the miserable people thus publicly exposed.
LJSpeech-1.1/mels/LJ028-0317.pt|Introduced into their assembly, he began to bewail his misfortunes, telling them that
LJSpeech-1.1/mels/LJ047-0014.pt|the Office of Naval Intelligence, the FBI and the CIA. The information known to the FBI is summarized below.
LJSpeech-1.1/mels/LJ002-0067.pt|but really kept for the few who had funds sufficient to gain them admission to these more comfortable quarters.
LJSpeech-1.1/mels/LJ003-0101.pt|must have had a tendency to turn them into the world hardened and accomplished in the ways of vice and crime. End quote.
LJSpeech-1.1/mels/LJ036-0048.pt|She boarded the Marsalis bus at St. Paul and Elm Streets to return home. She testified further, quote,
LJSpeech-1.1/mels/LJ022-0129.pt|in making this the most efficient and the cleanest example of public enterprise the world has ever seen.
LJSpeech-1.1/mels/LJ038-0121.pt|or to answer any questions concerning the card.
LJSpeech-1.1/mels/LJ031-0095.pt|Before this was accomplished the President's cardiac activity had ceased and closed cardiac massage was instituted, which made it impossible to inspect his back.
LJSpeech-1.1/mels/LJ007-0131.pt|Enough has probably been extracted from this most damnatory report to give a complete picture of the disgraceful state in which Newgate still remained in eighteen thirty-five.
LJSpeech-1.1/mels/LJ001-0067.pt|In the Low Countries and Cologne, which were very fertile of printed books, Gothic was the favorite.
LJSpeech-1.1/mels/LJ011-0061.pt|Let this monster give his name; I am ready to fight him. I am still determined to put myself in the place of Mr. Fauntleroy.
LJSpeech-1.1/mels/LJ019-0381.pt|in another there was half-heartedness, even apathy and an almost complete contempt for the provisions of the act.
LJSpeech-1.1/mels/LJ012-0170.pt|According to his statement, when sentenced to death, he had been driven to horse-stealing by the execration which had pursued him after the murder.
LJSpeech-1.1/mels/LJ005-0090.pt|the first by daily services, the latter by the appointment of schoolmasters and instruction in reading and writing.
LJSpeech-1.1/mels/LJ049-0127.pt|agencies other than the Secret Service have become involved in phases of the overall problem of protecting our national leaders.
LJSpeech-1.1/mels/LJ004-0100.pt|An infirmary, consisting of two distinct rooms, one for males and one for females, should be provided for the separate accommodation of the sick.
LJSpeech-1.1/mels/LJ003-0148.pt|and spent in providing coals, candles, plates, knives, and forks; while all the occupants of this part of the prison
LJSpeech-1.1/mels/LJ005-0073.pt|To its efforts, and their effect upon Parliament and the public mind, we must attribute the new Jail Acts of four George the fourth
LJSpeech-1.1/mels/LJ003-0166.pt|association at one time forbidden by custom, but which greed and rapacity long made the rule.
LJSpeech-1.1/mels/LJ028-0076.pt|However, several decades ago, an Oriental appeared at the Berlin Museum,
LJSpeech-1.1/mels/LJ012-0253.pt|A further discovery was made in an osier bed near Cold Harbor Lane, Camberwell,
LJSpeech-1.1/mels/LJ024-0053.pt|Fundamentally, if in the future, America cannot trust the Congress it elects to refrain from abuse of our Constitutional usages
LJSpeech-1.1/mels/LJ032-0069.pt|The person having access to the box then takes the notice to the window and is given the package.
LJSpeech-1.1/mels/LJ037-0082.pt|On the evening of November twenty-two,
LJSpeech-1.1/mels/LJ040-0085.pt|John Pic, however, did not think her position was worse than that of many other people.
LJSpeech-1.1/mels/LJ028-0099.pt|the first-born son of Nabopolassar, King of Babylon, am I.
LJSpeech-1.1/mels/LJ004-0170.pt|The only ventilation of this pit, this "dark, cheerless, damp, unwholesome cavern -- a dungeon in its worst sense"
LJSpeech-1.1/mels/LJ022-0110.pt|The key men for the major responsibilities of this great task already have been selected.
LJSpeech-1.1/mels/LJ024-0116.pt|When the time comes for action,
LJSpeech-1.1/mels/LJ040-0161.pt|Dr. Hartogs recommended that Oswald be placed on probation on condition that he seek help and guidance through a child guidance clinic.
LJSpeech-1.1/mels/LJ032-0266.pt|Paul M. Stombaugh, of the FBI Laboratory,
LJSpeech-1.1/mels/LJ006-0086.pt|his place is assigned among the most depraved, the most experienced, and the most incorrigible offenders in the middle yard.
LJSpeech-1.1/mels/LJ038-0228.pt|and into downtown Dallas through the Triple Underpass.
LJSpeech-1.1/mels/LJ028-0319.pt|"And now," he went on to say, "my coming to you, Babylonians,
LJSpeech-1.1/mels/LJ023-0054.pt|I hope that you have re-read the Constitution of the United States in these past few weeks.
LJSpeech-1.1/mels/LJ028-0108.pt|Fortunately in several of his long inscriptions, recently discovered in the Babylonian mounds, Nebuchadnezzar speaks of the building of the walls.
LJSpeech-1.1/mels/LJ042-0134.pt|The psychological effects of that change must have been highly unsettling. It should be remembered
LJSpeech-1.1/mels/LJ032-0083.pt|Experts on questioned documents from the Treasury Department and the FBI testified that the Hidell cards were counterfeit photographic reproductions
LJSpeech-1.1/mels/LJ036-0216.pt|Tippit got out and started to walk around the front of the car
LJSpeech-1.1/mels/LJ002-0281.pt|The demands for fees were excessive in Giltspur Street.
LJSpeech-1.1/mels/LJ034-0169.pt|the same corner where Brennan was sitting on a concrete wall.
LJSpeech-1.1/mels/LJ009-0004.pt|would be astonished to observe the peculiar tenderness, I was going to add respect,
LJSpeech-1.1/mels/LJ004-0094.pt|This act set forth that "whereas the malignant fever commonly called the jail distemper
LJSpeech-1.1/mels/LJ034-0122.pt|As will be discussed fully below, the Commission has concluded that this suspect was Lee Harvey Oswald.
LJSpeech-1.1/mels/LJ033-0179.pt|since the original bag had been discolored during various laboratory examinations and could not be used for valid identification by witnesses.
LJSpeech-1.1/mels/LJ022-0094.pt|in whose field the project falls, and also to notify another agency which I am creating -- a Progress Division.
LJSpeech-1.1/mels/LJ003-0334.pt|It made this too the excuse for begging the most important issue of the whole question.
LJSpeech-1.1/mels/LJ004-0034.pt|Moreover, the laws applied more particularly to county jurisdictions.
LJSpeech-1.1/mels/LJ048-0254.pt|advised, in the course of the Secret Service investigation of these events, that each agent reported for duty on time,
LJSpeech-1.1/mels/LJ025-0038.pt|carbon, hydrogen and oxygen.
LJSpeech-1.1/mels/LJ036-0217.pt|As Tippit reached the left front wheel the man pulled out a revolver and fired several shots.
LJSpeech-1.1/mels/LJ043-0100.pt|It is not possible to tell whether Oswald did this to provide an excuse for his eventual discharge,
LJSpeech-1.1/mels/LJ005-0222.pt|and as the window-frames would not shut tight, the prisoners complained much of the cold, especially at night.
LJSpeech-1.1/mels/LJ032-0040.pt|were written the words "A. Hidell, P.O. Box two nine one five Dallas, Texas."
LJSpeech-1.1/mels/LJ015-0011.pt|Maltby, who had bolted, was pursued and arrested, to end his life miserably by committing suicide in a Newgate cell.
LJSpeech-1.1/mels/LJ032-0153.pt|A palmprint could not be placed on this portion of the rifle, when assembled, because the wooden foregrip covers the barrel at this point.
LJSpeech-1.1/mels/LJ029-0092.pt|On November eight, when Lawson was briefed on the itinerary for the trip to Dallas,
LJSpeech-1.1/mels/LJ004-0132.pt|the old evils of indiscriminate association still continued unchecked.
LJSpeech-1.1/mels/LJ039-0067.pt|which is ordinarily required when a marksman must raise his rifle as a target moves farther away.
LJSpeech-1.1/mels/LJ044-0235.pt|If there was no conspiracy which would help him escape, the possibility of which has been considered in chapter six,
LJSpeech-1.1/mels/LJ028-0144.pt|fifty royal cubits in width, and two hundred in height.
LJSpeech-1.1/mels/LJ029-0102.pt|After the selection of the Trade Mart as the luncheon site,
LJSpeech-1.1/mels/LJ009-0116.pt|On the following day the capital convicts, whose companions have been hanged, are required to return thanks for their narrow escape.
LJSpeech-1.1/mels/LJ040-0228.pt|Despite his withdrawal, he gives the impression that he is not so difficult to reach as he appears and patient, prolonged effort
LJSpeech-1.1/mels/LJ022-0020.pt|cause of clearer thinking and a better understanding, are considering the whole rather than a mere part relating to one section or to one crop,
LJSpeech-1.1/mels/LJ047-0104.pt|reluctant and actually as far as I was concerned, was completely evasive on them. End quote.
LJSpeech-1.1/mels/LJ031-0127.pt|a protective circle of Secret Service agents surrounded Vice President and Mrs. Johnson
LJSpeech-1.1/mels/LJ043-0021.pt|While the exact sequence of events is not clear because of conflicting testimony,
LJSpeech-1.1/mels/LJ007-0005.pt|The inspectors paid tribute to the excellence of the motives of these philanthropic ladies, and recognized the good they did.
LJSpeech-1.1/mels/LJ018-0307.pt|Through Noyes the rest of the conspirators were eventually apprehended. Very little if any of the ill-gotten proceeds, however, was ever recovered.
LJSpeech-1.1/mels/LJ029-0191.pt|He asserted that Dallas had shed its reputation of the twenties as the, quote, Southwest hate capital of Dixie, end quote
LJSpeech-1.1/mels/LJ009-0088.pt|ignominy, sorrow, sufferings, wretchedness, pangs,
LJSpeech-1.1/mels/LJ021-0192.pt|We are not frightened by reactionary lawyers or political editors.
LJSpeech-1.1/mels/LJ038-0179.pt|(three) firearm identification of the bullet found in Walker's home, and (four)
LJSpeech-1.1/mels/LJ028-0518.pt|It is not strange, then, that they were included among the Seven Wonders of the World,
LJSpeech-1.1/mels/LJ026-0026.pt|As in the liquefaction of gases, there is a "critical point" at which the substance under experiment is neither gaseous nor liquid.
LJSpeech-1.1/mels/LJ031-0090.pt|A thorough inspection would have involved washing and cleansing the back, and this is not practical in treating an acutely injured patient.
LJSpeech-1.1/mels/LJ016-0110.pt|The third, Bell, remained longest at large. He too was run into at a lodging in the Kingsland Road.
LJSpeech-1.1/mels/LJ032-0019.pt|Shortly after the Mannlicher-Carcano rifle was found on the sixth floor of the Texas School Book Depository Building, agents of the FBI
LJSpeech-1.1/mels/LJ044-0146.pt|On June twenty-four, nineteen sixty-three, he applied for a new passport
LJSpeech-1.1/mels/LJ048-0003.pt|Hosty's interpretation of the prevailing FBI instructions on referrals to the Secret Service was defended before the Commission by his superiors.
LJSpeech-1.1/mels/LJ013-0194.pt|but on the second day the discovery of fresh evidence, more particularly the recovery of some of Lord William's stolen plate,
LJSpeech-1.1/mels/LJ038-0224.pt|Another statement which limits the time when it could have been written is the reference, quote, you and the baby, end quote,
LJSpeech-1.1/mels/LJ014-0147.pt|shaking her clenched and manacled hands in the officers' faces.
LJSpeech-1.1/mels/LJ019-0168.pt|Renewed recommendations to provide employment resulted in the provision of a certain amount of oakum for picking,
LJSpeech-1.1/mels/LJ029-0175.pt|Both Dallas papers cited White House sources on September twenty-six as confirming the President's intention to visit Texas on November twenty-one and twenty-two,
LJSpeech-1.1/mels/LJ033-0078.pt|Neither she nor Mrs. Paine saw him leave the house. About half-a-block away from the Paine house was the residence of Mrs. Linnie Mae Randle,
LJSpeech-1.1/mels/LJ040-0235.pt|When Lee became a disciplinary problem upon his return to school in the fall of nineteen fifty-three,
LJSpeech-1.1/mels/LJ003-0322.pt|except for the use of the debtors, or as medical comforts for the infirmary.
LJSpeech-1.1/mels/LJ018-0359.pt|her object being first to dispose of the wife of a man for whom she had conceived a guilty passion,
LJSpeech-1.1/mels/LJ030-0128.pt|From Main Street the motorcade turned right and went north on Houston Street, passing tall buildings on the right,
LJSpeech-1.1/mels/LJ033-0204.pt|So if I found all of these then I would have been able to say these fibers probably had come from this blanket. But since I found so few,
LJSpeech-1.1/mels/LJ013-0042.pt|the foundations of which had been laid by buying old ships on purpose to cast them away.
LJSpeech-1.1/mels/LJ041-0174.pt|and had not intended any criticism of Oswald's political views which is the way in which, Thornley thought, Oswald took his remarks.
LJSpeech-1.1/mels/LJ030-0245.pt|I was pushed down by Agent Youngblood.
LJSpeech-1.1/mels/LJ031-0103.pt|While Dr. Carrico went on to attend the President, Dr. Dulany stayed with the Governor and was soon joined by several other doctors.
LJSpeech-1.1/mels/LJ048-0152.pt|At some overpasses all persons were excluded
LJSpeech-1.1/mels/LJ018-0232.pt|He himself prepared it on a blank form which he had brought with him on purpose.
LJSpeech-1.1/mels/LJ050-0200.pt|The Secret Service should utilize the personnel of other Federal law enforcement offices
LJSpeech-1.1/mels/LJ012-0167.pt|But Probert, who turned king's evidence, and materially assisted conviction,
LJSpeech-1.1/mels/LJ006-0225.pt|If any man presumed to turn in too early
LJSpeech-1.1/mels/LJ014-0127.pt|She was smartly dressed in a plaid shawl, a white lace cap;
LJSpeech-1.1/mels/LJ033-0021.pt|after the birth of their second child.
LJSpeech-1.1/mels/LJ036-0080.pt|toward a light-colored Rambler station wagon, which was moving slowly along Elm toward the underpass:
LJSpeech-1.1/mels/LJ008-0083.pt|Two cart-loads of faggots were piled about her, and after she had hung for half-an-hour the fire was kindled.
LJSpeech-1.1/mels/LJ010-0282.pt|Pate was said to be an eccentric person, given to strange acts and antics, such as mixing whiskey and camphor with his morning bath-water,
LJSpeech-1.1/mels/LJ013-0088.pt|the cashier gave them eight Bank of England notes for one thousand pounds each, saying that they could get so much specie nowhere else.
LJSpeech-1.1/mels/LJ028-0279.pt|after which he commanded his servants to tell no one what had come to pass, while he himself pondered the matter.
LJSpeech-1.1/mels/LJ002-0057.pt|These wards were all fitted with barrack-beds, but no bedding was supplied.
LJSpeech-1.1/mels/LJ032-0253.pt|From September twenty-four, nineteen sixty-three, when Marina Oswald arrived in Irving from New Orleans, until the morning of the assassination,
LJSpeech-1.1/mels/LJ043-0135.pt|Oswald shot at Maj. Gen. Edwin A. Walker (Resigned, U.S. Army),
LJSpeech-1.1/mels/LJ025-0115.pt|and from that day to this the rapid improvement of methods of investigation and the energy of a host of accurate observers
LJSpeech-1.1/mels/LJ050-0166.pt|The Commission was struck by the apparent lack of effort, on an interagency basis,
LJSpeech-1.1/mels/LJ038-0026.pt|Other policemen entered the front door and searched the balcony.
LJSpeech-1.1/mels/LJ028-0470.pt|Time has dealt even less kindly with it, for it may be traced only for the distance of about a mile along its eastern side.
LJSpeech-1.1/mels/LJ018-0253.pt|For these crimes William Roupell was tried at the Central Criminal Court on the twenty-fourth September, eighteen sixty-two.
LJSpeech-1.1/mels/LJ019-0147.pt|this occurred in summer at eight, but in the winter months it took place at dusk, and was often as early as four or five.
LJSpeech-1.1/mels/LJ045-0148.pt|After all, when will all your foolishness come to an end? All of these comedies. First one thing and then another. And now this fictitious name, end quote.
LJSpeech-1.1/mels/LJ043-0031.pt|I am surprised that he didn't do something worse, end quote.
LJSpeech-1.1/mels/LJ033-0039.pt|and one which provided an excuse for the carrying of a bulky package the following morning.
LJSpeech-1.1/mels/LJ010-0006.pt|Certain crimes, those against the person especially, diminished gradually. They became less easy or remunerative.
LJSpeech-1.1/mels/LJ049-0005.pt|Rigorous security precautions had been arranged at Love Field with the local law enforcement authorities by Agents Sorrels and Lawson.
LJSpeech-1.1/mels/LJ004-0142.pt|where a lad lay ill with fever, three other prisoners, at first perfectly healthy, were lodged. Of course they were seized with the fever;
LJSpeech-1.1/mels/LJ042-0038.pt|and religion and education are used as a tool to suppress what would otherwise be a population questioning their government's unfair
LJSpeech-1.1/mels/LJ046-0079.pt|The rights of private individuals must not be infringed.
LJSpeech-1.1/mels/LJ026-0123.pt|which could be derived by the ordinary chemical evolution of protoplasm, proteid, sugar, starch or fats.
LJSpeech-1.1/mels/LJ037-0255.pt|testified that Commission Exhibit Number one sixty-two was the jacket worn by the man they saw on November twenty-two.
LJSpeech-1.1/mels/LJ028-0345.pt|He then chose out near three thousand of the leading citizens and caused them to be crucified, while he allowed the remainder still to inhabit the city.
LJSpeech-1.1/mels/LJ045-0076.pt|The letter fell into Oswald's hands when it was returned to his post office box
LJSpeech-1.1/mels/LJ027-0103.pt|Thus, for instance, the unborn whale has rudimentary teeth,
LJSpeech-1.1/mels/LJ011-0076.pt|His offense was uttering forged notes, and there was strong suspicion that he had long subsisted entirely by this fraud.
LJSpeech-1.1/mels/LJ047-0223.pt|I don't recall the exact date. It was about a week prior. End quote.
LJSpeech-1.1/mels/LJ016-0369.pt|upon them devolved the painful duty of cutting down the body and preparing for the inquest.
LJSpeech-1.1/mels/LJ050-0189.pt|that written instructions might come into the hands of local newspapers, to the prejudice of the precautions described.
LJSpeech-1.1/mels/LJ019-0095.pt|which was yet under full control, and might be made to work corn-mills or prove otherwise productive;
LJSpeech-1.1/mels/LJ029-0205.pt|for President Kennedy, stating that "in many respects Dallas County has isolated itself from the main stream of life in the world in this decade.
LJSpeech-1.1/mels/LJ047-0045.pt|and promised to advise the FBI if he heard from them.
LJSpeech-1.1/mels/LJ036-0069.pt|Instead of waiting there, Oswald apparently went as far away as he could and boarded the first Oak Cliff bus which came along
LJSpeech-1.1/mels/LJ014-0180.pt|secure the stock of watches and jewelry, then lock up the place and take on the keys to Mr. Berry's private house in Pimlico.
LJSpeech-1.1/mels/LJ021-0060.pt|Minimum wages have been established and other wages adjusted toward a rising standard of living.
LJSpeech-1.1/mels/LJ002-0128.pt|He also makes the curious calculation that the costs of these actions if undefended
LJSpeech-1.1/mels/LJ028-0437.pt|Here, it has been suggested, were the famous hanging gardens which some ancient authors included among the Seven Wonders of the World.
LJSpeech-1.1/mels/LJ028-0234.pt|Cyrus was now reduced to great perplexity, as time went on and he made no progress against the place.
LJSpeech-1.1/mels/LJ001-0050.pt|and though the famous family of Aldus restored its technical excellence, rejecting battered letters,
LJSpeech-1.1/mels/LJ006-0154.pt|Nothing was more prominently brought out by the inspectors than the inefficiency of the governor at that time, Mr. Cope.
LJSpeech-1.1/mels/LJ022-0148.pt|to enforce minimum wages, to prevent excessive hours,
LJSpeech-1.1/mels/LJ035-0070.pt|Truly stood in front of the building.
LJSpeech-1.1/mels/LJ028-0250.pt|Such, then, were the circumstances of the first taking of Babylon.
LJSpeech-1.1/mels/LJ043-0001.pt|Report of the President's Commission on the Assassination of President Kennedy.
LJSpeech-1.1/mels/LJ004-0171.pt|was by a kind of chimney, which the prisoners kept hermetically sealed, and which had never been opened in the memory of the turnkey.
LJSpeech-1.1/mels/LJ025-0009.pt|for in the past fifty years it has been made evident that in general principles all living things are fundamentally similar.
LJSpeech-1.1/mels/LJ010-0066.pt|which under Thistlewood as dictator was to rule the nation, by first handing over its capital to fire and pillage.
LJSpeech-1.1/mels/LJ022-0139.pt|with which we have been concerned for two years.
LJSpeech-1.1/mels/LJ014-0056.pt|while in Ireland a wife dashed out her husband's brains with a hammer.
LJSpeech-1.1/mels/LJ037-0079.pt|They ran to the door in time to see a man with a revolver cut across their lawn and disappear around a corner of the house onto Patton.
LJSpeech-1.1/mels/LJ032-0044.pt|shows an imprint made by the cash register which recorded the receipt of twenty-one dollars, forty-five cents on March thirteen, nineteen sixty-three.
LJSpeech-1.1/mels/LJ036-0116.pt|Lee Oswald was Number three;
LJSpeech-1.1/mels/LJ028-0476.pt|The entire width of this inner defense was about fifty-five feet; its height is uncertain.
LJSpeech-1.1/mels/LJ004-0137.pt|and that it was accomplished by "sleeping edgewise."
LJSpeech-1.1/mels/LJ003-0113.pt|A prisoner, generally the oldest and most dexterous thief,
LJSpeech-1.1/mels/LJ037-0128.pt|were on the lot at the time, and they saw a white male with a revolver in his hands running south on Patton.
LJSpeech-1.1/mels/LJ031-0137.pt|At approximately one:twenty p.m., Vice President Johnson was notified by O'Donnell that President Kennedy was dead.
LJSpeech-1.1/mels/LJ005-0008.pt|they were followed by a crowd of reckless boys, who jeered at and insulted them.
LJSpeech-1.1/mels/LJ001-0083.pt|The seventeenth century founts were bad rather negatively than positively.
LJSpeech-1.1/mels/LJ006-0224.pt|New arrivals, especially the innocent and still guileless debutant, were tormented with rude horse-play, and assailed by the most insulting "chaff."
LJSpeech-1.1/mels/LJ015-0298.pt|But while Hardwicke was in communication with Saward, the bank was in communication with London
LJSpeech-1.1/mels/LJ017-0212.pt|Her captain was John Smith;
LJSpeech-1.1/mels/LJ049-0096.pt|There have been a number of efforts to make assassination a Federal crime, particularly after the assassination of President McKinley
LJSpeech-1.1/mels/LJ015-0069.pt|but the firm he served got him a situation as clerk in the office of the Great Northern Railway,
LJSpeech-1.1/mels/LJ013-0243.pt|Good now offered to go to Wandsworth and satisfy the pawnbroker.
LJSpeech-1.1/mels/LJ015-0235.pt|and last, but not least, Agar frequently traveled up and down the line to test the false keys he had manufactured with Pierce's assistance.
LJSpeech-1.1/mels/LJ016-0096.pt|They were penal servitude men, their names Bell, Brown, and Barry, and they were awaiting transfer to Leicester,
LJSpeech-1.1/mels/LJ029-0110.pt|The route impressed the agents as a natural and desirable one.
LJSpeech-1.1/mels/LJ011-0098.pt|He soon, however, became deeply involved in Stock Exchange speculations,
LJSpeech-1.1/mels/LJ001-0016.pt|The Middle Ages brought calligraphy to perfection, and it was natural therefore
LJSpeech-1.1/mels/LJ005-0130.pt|There were tread-wheels at most of the prisons, and regular employment thereon or at some other kind of hard labor.
LJSpeech-1.1/mels/LJ018-0091.pt|Wagner and Bateman, who had already been convicted of systematic forgery, and sentenced to transportation, but they had been released on ticket-of-leave
LJSpeech-1.1/mels/LJ019-0053.pt|and our modern practice has prudently tried to steer between the two extremes, accepting as the best system a judicious combination of both.
LJSpeech-1.1/mels/LJ023-0071.pt|For nearly twenty years there was no conflict between the Congress and the Court.
LJSpeech-1.1/mels/LJ019-0390.pt|Since then a strong central authority has labored steadfastly to compass concentration,
LJSpeech-1.1/mels/LJ047-0163.pt|According to Hosty, Mrs. Paine indicated that she thought she could find out where Oswald was living and would let him know.
LJSpeech-1.1/mels/LJ016-0035.pt|the wall beneath and above it was "rusticated," in other words, the granite surface had become roughened, and offered a sort of foothold.
LJSpeech-1.1/mels/LJ015-0211.pt|Each safe had three sets of double keys, all held by confidential servants of the company.
LJSpeech-1.1/mels/LJ043-0148.pt|She testified that she was agitated because she had found the note in Oswald's room,
LJSpeech-1.1/mels/LJ028-0207.pt|On the fourteenth day Sippar was taken without a battle.
LJSpeech-1.1/mels/LJ007-0062.pt|Latterly his ministrations to the condemned had been restricted to a visit on Sunday afternoons, and occasionally about once a fortnight on a week-day.
LJSpeech-1.1/mels/LJ049-0186.pt|the Commission received a number of proposals designed to improve current arrangements for protecting the President.
LJSpeech-1.1/mels/LJ011-0196.pt|Mr. Turner at once set off for London, where he sought the assistance of the police,
LJSpeech-1.1/mels/LJ003-0227.pt|So unjust and unequal was the system, that the allowance to convicted criminals was better than that of the innocent debtor,
LJSpeech-1.1/mels/LJ047-0243.pt|According to Revill, Hosty indicated that he was going to tell this to Lieutenant Wells of the homicide and robbery bureau.
LJSpeech-1.1/mels/LJ007-0116.pt|A few others, who could not afford a payment of more than half a guinea, were permitted to monopolize a part of the prison infirmary,
LJSpeech-1.1/mels/LJ018-0243.pt|the hardship to the holders of these lands being plain, should the allegations of invalidity be made good.
LJSpeech-1.1/mels/LJ007-0080.pt|These powers were not invariably put in force, and there were in consequence many unhappy lunatics in Newgate and other jails,
LJSpeech-1.1/mels/LJ038-0037.pt|Oswald then struck McDonald between the eyes with his left fist; with his right hand he drew a gun from his waist.
LJSpeech-1.1/mels/LJ043-0175.pt|The items which Oswald left at home when he made his attack on Walker suggest a strong concern for his place in history.
LJSpeech-1.1/mels/LJ040-0114.pt|Relations soon became strained, however, so in late September Lee and his mother moved to their own apartment in the Bronx.
LJSpeech-1.1/mels/LJ010-0241.pt|but she declared she would not remain a prisoner in her own palace, and next day drove out as usual in an open barouche.
LJSpeech-1.1/mels/LJ037-0202.pt|identified records of Seaport Traders, Incorporated, which showed that a, quote, point three eight
LJSpeech-1.1/mels/LJ019-0230.pt|In eighteen sixty-one a similar work was undertaken to provide separate cellular accommodation for the female inmates of Newgate,
LJSpeech-1.1/mels/LJ010-0134.pt|He roared out snatches of a song about Death or Liberty, and just before he was turned off,
LJSpeech-1.1/mels/LJ014-0005.pt|but too late to give substantial aid.
LJSpeech-1.1/mels/LJ005-0186.pt|They neither built new jails nor contracted with the counties, as had been expected, for the transfer of their prisoners.
LJSpeech-1.1/mels/LJ017-0003.pt|Nevertheless, in order to give completeness to the picture
LJSpeech-1.1/mels/LJ020-0014.pt|beating the batter smooth as you go on until all of the liquid and flour has gone in.
LJSpeech-1.1/mels/LJ014-0245.pt|It was the custom in this office to make the banker's passbook the basis of the entries in the company's ledgers.
LJSpeech-1.1/mels/LJ008-0180.pt|Among the dead was a sailor lad whom no one knew;
LJSpeech-1.1/mels/LJ019-0022.pt|On the other hand, it must be admitted
LJSpeech-1.1/mels/LJ027-0034.pt|Hence, as Jordan has said, "the inside of an animal tells the real history of its ancestry; the outside tells us only where its ancestors have been."
LJSpeech-1.1/mels/LJ040-0124.pt|This continued despite the efforts of the school authorities and, to a lesser extent, of his mother to have him return to school.
LJSpeech-1.1/mels/LJ006-0192.pt|There was no school for adults; only the boys were taught anything, and their instructor, with his assistant, were convicted prisoners.
LJSpeech-1.1/mels/LJ014-0229.pt|Mobbs systematically ill-used his wife for a long space of time, and at last cut her throat.
LJSpeech-1.1/mels/LJ031-0162.pt|other terminal buildings and the neighboring parking lots, of all people.
LJSpeech-1.1/mels/LJ032-0094.pt|listing Marina Oswald and A. J. Hidell
LJSpeech-1.1/mels/LJ022-0155.pt|Power production in this country is virtually back to the nineteen twenty-nine peak.
LJSpeech-1.1/mels/LJ009-0291.pt|He was always known as a mild-mannered man of simple tastes, much given to angling in the New River, and a devoted rabbit fancier.
LJSpeech-1.1/mels/LJ006-0130.pt|had a key of both the master's side and middle side yards, was the only person present at the distribution of beer, and was trusted to examine,
LJSpeech-1.1/mels/LJ040-0131.pt|Marguerite Oswald visited her son at Youth House, where she recalled that she waited in line, quote,
LJSpeech-1.1/mels/LJ009-0113.pt|whistles merrily, and points upwards with madness in his look.
LJSpeech-1.1/mels/LJ037-0078.pt|when they heard the sound of gunfire and the screams of Helen Markham.
LJSpeech-1.1/mels/LJ006-0093.pt|So closely did they lie together, that the inspectors at their night visits found it difficult in stepping across the room to avoid treading on them.
LJSpeech-1.1/mels/LJ008-0061.pt|The entrance upon this floor or leaf is from the middle window over the gate of the prison;
LJSpeech-1.1/mels/LJ001-0156.pt|The paper on which the printing is to be done is a necessary part of our subject:
LJSpeech-1.1/mels/LJ029-0195.pt|when Governor Connally confirmed on November eight that the President would come to Texas on November twenty-one and twenty-two,
LJSpeech-1.1/mels/LJ040-0080.pt|That situation, however, was short-lived,
LJSpeech-1.1/mels/LJ010-0165.pt|but he came as a lad to London, and took service as a pot-boy to a publican.
LJSpeech-1.1/mels/LJ018-0334.pt|Webster, it may be mentioned here, was one of the worst prisoners ever remembered in Newgate
LJSpeech-1.1/mels/LJ046-0227.pt|According to Special Agent in Charge Bouck,
LJSpeech-1.1/mels/LJ019-0089.pt|sometimes it embraced the tread-wheel or the newly-invented instruments known as cranks, which ground air.
LJSpeech-1.1/mels/LJ034-0005.pt|He worked principally on the first and sixth floors of the building, gathering books listed on orders and delivering them to the shipping room on the first floor.
LJSpeech-1.1/mels/LJ043-0089.pt|to a commercial advertising photography firm in Dallas, where he was employed as a trainee starting October twelve, nineteen sixty-two.
LJSpeech-1.1/mels/LJ016-0247.pt|while round about were shoe-strings, boot-laces, and lasts. Marwood, strange to say, followed the same trade as Calcraft.
LJSpeech-1.1/mels/LJ045-0105.pt|She testified that she told him, quote,
LJSpeech-1.1/mels/LJ020-0027.pt|Half the quantity of sponge given in preceding receipt.
LJSpeech-1.1/mels/LJ028-0211.pt|He appointed Gobrias governor of Babylon.
LJSpeech-1.1/mels/LJ019-0314.pt|The separation of prisoners in cells duly certified by the inspectors was insisted upon,
LJSpeech-1.1/mels/LJ005-0001.pt|The Chronicles of Newgate, Volume two. By Arthur Griffiths. Section eight: The beginnings of prison reform.
LJSpeech-1.1/mels/LJ009-0178.pt|In eighteen thirty-two the dissection of bodies cut down from the gallows, which had been decreed centuries previously, was abolished;
LJSpeech-1.1/mels/LJ034-0062.pt|Although a person could handle a carton and not leave identifiable prints,
LJSpeech-1.1/mels/LJ027-0088.pt|Extensive comparison, on the contrary, shows them to be the same, although the essential identity is obscured by adaptive modifications.
LJSpeech-1.1/mels/LJ032-0240.pt|By Sunday, March thirty-one, nineteen sixty-three,
LJSpeech-1.1/mels/LJ036-0024.pt|on a trip which passed a check point at St. Paul and Elm Streets at twelve:thirty-six p.m., November twenty-two, nineteen sixty-three.
LJSpeech-1.1/mels/LJ039-0201.pt|fired two series of three shots at twenty-five yards in four point six and four point eight seconds.
LJSpeech-1.1/mels/LJ006-0113.pt|The authority of these wardsmen so improperly exalted, and so entirely unchecked, degenerated into a baneful despotism.
LJSpeech-1.1/mels/LJ048-0137.pt|there have been references to the numerous discussions between Secret Service representatives and the Dallas Police Department.
LJSpeech-1.1/mels/LJ007-0014.pt|The admission of a crowd of visitors to assist in these lay services has already been remarked upon; as the inspectors pointed out,
LJSpeech-1.1/mels/LJ007-0057.pt|Turnkeys occasionally visited the press-yard, but its occupants were under little or no control.
LJSpeech-1.1/mels/LJ010-0121.pt|that he was, to use Thistlewood's words, "a contriver, instigator, and entrapper."
LJSpeech-1.1/mels/LJ011-0176.pt|He now pretended that Mr. Turner was also on his way to the border, pursued by sheriffs' officers.
LJSpeech-1.1/mels/LJ036-0189.pt|at the southeast corner of tenth Street and Patton Avenue, moments before the Tippit shooting.
LJSpeech-1.1/mels/LJ006-0068.pt|We have reason to fear that poverty, ragged clothes, and an inability to pay the ward dues, elsewhere exacted for better accommodation,
LJSpeech-1.1/mels/LJ006-0097.pt|Water might not be taken into the ward for washing purposes.
LJSpeech-1.1/mels/LJ048-0085.pt|the Commission believes that the liaison between all Federal agencies responsible for Presidential protection should be improved.
LJSpeech-1.1/mels/LJ039-0160.pt|In tests with the Mannlicher-Carano C twenty-seven sixty-six rifle, over one hundred rounds of this ammunition were fired by the FBI
LJSpeech-1.1/mels/LJ038-0052.pt|testified regarding the arrest of Oswald, as did the various police officers who participated in the fight.
LJSpeech-1.1/mels/LJ010-0063.pt|The massacre of the whole of the Cabinet Ministers at one stroke was to be followed by an attack
LJSpeech-1.1/mels/LJ009-0295.pt|who had been a convicted prisoner at York, but who consented to act as hangman when Calcraft was engaged, and no other functionary could be obtained.
LJSpeech-1.1/mels/LJ011-0250.pt|While thus engaged, Howard thrust the poker into the fire.
LJSpeech-1.1/mels/LJ018-0273.pt|Tarpey was caught through his wife,
LJSpeech-1.1/mels/LJ047-0131.pt|In early September nineteen sixty-three
LJSpeech-1.1/mels/LJ040-0232.pt|Few social agencies even in New York were equipped to provide the kind of intensive treatment that he needed,
LJSpeech-1.1/mels/LJ010-0051.pt|The well-known Cato Street conspiracy,
LJSpeech-1.1/mels/LJ008-0077.pt|where the apparatus for the punishment she was about to experience
LJSpeech-1.1/mels/LJ006-0115.pt|Their original capital had been a few shillings, and for this they purchased the right to tax their fellows to the extent of pounds per week.
LJSpeech-1.1/mels/LJ048-0262.pt|during the hours they are officially employed at their post of duty, or when they may reasonably expect that they may be called upon to perform an official duty.
LJSpeech-1.1/mels/LJ020-0101.pt|From the beginning of your apprenticeship in housewifery, learn how to "dovetail" your duties neatly into one another.
LJSpeech-1.1/mels/LJ045-0207.pt|He could not keep them with him in Dallas, where at least he could see his children whom, several witnesses testified, he seemed to love.
LJSpeech-1.1/mels/LJ021-0009.pt|with a greater certainty of the employment of labor at a reasonable wage and of more business at a fair profit.
LJSpeech-1.1/mels/LJ038-0137.pt|the Commission found that Oswald lied when he told Frazier that he was returning to Irving to obtain curtain rods.
LJSpeech-1.1/mels/LJ041-0164.pt|which Thornley read at Oswald's suggestion.
LJSpeech-1.1/mels/LJ001-0006.pt|And it is worth mention in passing that, as an example of fine typography,
LJSpeech-1.1/mels/LJ003-0131.pt|He was an inmate of the same ward with others of the most dreadful sort, quote,
LJSpeech-1.1/mels/LJ003-0208.pt|a number of amateurs were ever ready to give their gratuitous ministrations to the condemned.
LJSpeech-1.1/mels/LJ010-0172.pt|He saw Prince Albert return there from a visit to Woolwich, and then passed on to Constitution Hill,
LJSpeech-1.1/mels/LJ028-0203.pt|Less picturesque than this Hebrew legend is the royal record of Babylon, which fortunately was inscribed upon a clay cylinder from the ruins of the city.
LJSpeech-1.1/mels/LJ007-0146.pt|vaunting his own adventures, or listening to those of others;
LJSpeech-1.1/mels/LJ021-0087.pt|We have the right to expect that this driving power will be given patriotically and whole-heartedly to our nation.
LJSpeech-1.1/mels/LJ025-0077.pt|Their food is provided for them,
LJSpeech-1.1/mels/LJ028-0185.pt|Perhaps Babylon was so strongly fortified that at first he made no attempt to add it to his empire,
LJSpeech-1.1/mels/LJ030-0207.pt|with the follow-up car trailing the President's automobile by approximately five feet.
LJSpeech-1.1/mels/LJ012-0109.pt|But they at once made tracks, and took up their residence under assumed names in a tavern in Bloomsbury.
LJSpeech-1.1/mels/LJ032-0230.pt|that the published pictures were the same as the original except for retouching done by these publications, apparently for the purpose of clarifying the lines of the rifle
LJSpeech-1.1/mels/LJ049-0095.pt|for all offenses within its jurisdiction, as are FBI agents and Federal marshals.
LJSpeech-1.1/mels/LJ024-0142.pt|I seek to make American democracy succeed.
LJSpeech-1.1/mels/LJ050-0177.pt|This PRS agent will also be responsible for establishing an informal local liaison committee
LJSpeech-1.1/mels/LJ011-0006.pt|He went to the bank, and found that no stocks stood in her name. He called at once upon Fauntleroy, his client's bankers, for an explanation,
LJSpeech-1.1/mels/LJ029-0012.pt|He had made only a few brief visits to the State since the nineteen sixty Presidential campaign and in nineteen sixty-two he began to consider a formal visit.
LJSpeech-1.1/mels/LJ026-0022.pt|if chlorophyll is absent, carbon is obtained from sugar or some similar compound,
LJSpeech-1.1/mels/LJ019-0233.pt|and when it was completed, both sides of the prison were brought into harmony with modern ideas.
LJSpeech-1.1/mels/LJ010-0096.pt|Edgeware Road, completing their dispositions for assuming supreme power after the blow had been struck.
LJSpeech-1.1/mels/LJ045-0111.pt|They asked for Lee Oswald who was not called to the telephone because he was known by the other name.
LJSpeech-1.1/mels/LJ005-0298.pt|to the county jails from such prisons as were past improvement, and that the borough funds should be charged for the accommodation.
LJSpeech-1.1/mels/LJ009-0224.pt|At the first-named the exhibition nearly created a tumult, and the body was taken down and buried,
LJSpeech-1.1/mels/LJ014-0179.pt|a working jeweler, shopman to a Mr. Berry of Parliament Street. It was Cope's duty to stay in the shop till the last, close the shutters,
LJSpeech-1.1/mels/LJ035-0044.pt|If the man had passed from the vestibule into the lunchroom, Baker could not have seen him.
LJSpeech-1.1/mels/LJ008-0113.pt|and his soul shot out so piercingly through the port-holes of his head, that the first glance of him nearly petrified me
LJSpeech-1.1/mels/LJ050-0087.pt|propensity toward violent action, or some similar characteristic, coupled with some evaluation of the capability of the individual or group
LJSpeech-1.1/mels/LJ047-0135.pt|According to the information received by the Bureau
LJSpeech-1.1/mels/LJ049-0066.pt|For instance, the lead car always is manned by Secret Service agents familiar with the area and with local law enforcement officials;
LJSpeech-1.1/mels/LJ030-0005.pt|by helicopter at ten:forty-five A.M., Eastern Standard Time, on November twenty-one, nineteen sixty-three, for Andrews Air Force Base.
LJSpeech-1.1/mels/LJ027-0158.pt|But according to the opposite view no reason can be assigned why such should be the case.
LJSpeech-1.1/mels/LJ048-0225.pt|they had little opportunity to eat during the day. No food was available at the Press Club.
LJSpeech-1.1/mels/LJ033-0149.pt|the FBI Laboratory developed a latent palmprint and latent fingerprint on the bag.
LJSpeech-1.1/mels/LJ018-0255.pt|The case was easily and rapidly disposed of.
LJSpeech-1.1/mels/LJ014-0276.pt|Watts's crime was discovered by the secretary of the Globe Company, who came suddenly upon the extensive falsification of the passbook.
LJSpeech-1.1/mels/LJ039-0219.pt|Frazier testified that the rifle was accurate, that it had less recoil than the average military rifle
LJSpeech-1.1/mels/LJ036-0213.pt|The man's general description was similar to the one broadcast over the police radio.
LJSpeech-1.1/mels/LJ037-0179.pt|was discarded along with the others as Oswald left the scene.
LJSpeech-1.1/mels/LJ037-0009.pt|One witness felt he was too distant from the gunman to make a positive identification.
LJSpeech-1.1/mels/LJ038-0163.pt|Prior attempt to kill.
LJSpeech-1.1/mels/LJ006-0139.pt|Nobody interfered with them or regulated their conduct. They might get drunk when so disposed, and did so frequently, alone or in company.
LJSpeech-1.1/mels/LJ039-0091.pt|Sergeant Zahm expressed the opinion that the shot which struck President Kennedy in the neck at one hundred seventy-six point nine
LJSpeech-1.1/mels/LJ036-0016.pt|Lee Harvey Oswald left the building approximately three minutes after the assassination.
LJSpeech-1.1/mels/LJ030-0109.pt|The Vice-Presidential car
LJSpeech-1.1/mels/LJ019-0030.pt|Major, afterwards Sir Joshua Jebb,
LJSpeech-1.1/mels/LJ015-0154.pt|When the crash came there were pensioners and other recipients of his bounty who could not believe
LJSpeech-1.1/mels/LJ038-0039.pt|Three other officers, moving toward the scuffle, grabbed Oswald from the front, rear and side.
LJSpeech-1.1/mels/LJ017-0146.pt|He had all the characteristics of the poisoner -- the calm deliberation,
LJSpeech-1.1/mels/LJ036-0171.pt|he would have arrived there about twelve:fifty-nine to one p.m.
LJSpeech-1.1/mels/LJ039-0099.pt|In accordance with standard Marine procedures, Oswald received extensive training in marksmanship.
LJSpeech-1.1/mels/LJ004-0216.pt|The most noticeable of the improvements introduced was a better regulation of dietaries within the prison.
LJSpeech-1.1/mels/LJ045-0136.pt|as Oswald went on to say. In Oswald's imagination, quote,
LJSpeech-1.1/mels/LJ004-0135.pt|twenty men slept on eight straw beds, with sixteen rugs amongst them, and a piece of timber for a bolster.
LJSpeech-1.1/mels/LJ045-0173.pt|Question: What did you say to that? Answer:
LJSpeech-1.1/mels/LJ040-0030.pt|When he was in the Soviet Union, he apparently resented the Communist Party members,
LJSpeech-1.1/mels/LJ024-0096.pt|No amendment which any powerful economic interests or the leaders of any powerful political party have had reason to oppose
LJSpeech-1.1/mels/LJ018-0208.pt|were a low lot, the lowest among criminals except, perhaps, the 'smashers,' or those who passed the counterfeit money.
LJSpeech-1.1/mels/LJ030-0215.pt|the car lurched forward, causing him to lose his footing. He ran three or four steps, regained his position and mounted the car.
LJSpeech-1.1/mels/LJ012-0156.pt|His arrest and conviction cast dismay over the whole gang of receivers, and for a time seriously checked the nefarious traffic.
LJSpeech-1.1/mels/LJ019-0028.pt|Mr. Shaw-Lefevre, the Speaker of the House of Commons, Sir Benjamin Brodie,
LJSpeech-1.1/mels/LJ019-0079.pt|The cells inhabited by prisoners were of very varying dimensions;
LJSpeech-1.1/mels/LJ046-0046.pt|In all of these roles the President must go to the people.
LJSpeech-1.1/mels/LJ018-0054.pt|While in the condemned cell he conversed freely with the warders in broken English or through an interpreter.
LJSpeech-1.1/mels/LJ014-0338.pt|These bankers, wishing for more specific information,
LJSpeech-1.1/mels/LJ026-0113.pt|Only proteid foods form new protoplasm
LJSpeech-1.1/mels/LJ015-0310.pt|which had received so perverted and mistaken direction,
LJSpeech-1.1/mels/LJ049-0040.pt|The assassination suggests that it would have been of prime importance
LJSpeech-1.1/mels/LJ022-0052.pt|here as in every other nation, we have come to recognize the possibility and the necessity of certain helpful remedial measures.
LJSpeech-1.1/mels/LJ032-0054.pt|"A. Hidell, P.O. Box two nine one five, Dallas, Texas," on March twenty, nineteen sixty-three.
LJSpeech-1.1/mels/LJ005-0290.pt|Instances rarely occur in which the borough jails admit of any proper classification of the prisoners.
LJSpeech-1.1/mels/LJ028-0314.pt|observing him, hastened down, and setting one of the gates slightly ajar, questioned him who he was, and on what errand he had come.
LJSpeech-1.1/mels/LJ028-0324.pt|his body red with marks of scourging and with blood, had no suspicion but that he spoke the truth, and was really come to be their friend and helper.
LJSpeech-1.1/mels/LJ033-0035.pt|and Marina Oswald testified that Oswald did not say anything about curtain rods on the day before the assassination.
LJSpeech-1.1/mels/LJ050-0082.pt|the interest of the Secret Service goes beyond information on individuals or groups threatening to cause harm or embarrassment to the President.
LJSpeech-1.1/mels/LJ046-0190.pt|it had arrangements to be notified about release from confinement in roughly one thousand cases;
LJSpeech-1.1/mels/LJ015-0096.pt|whether representing real or fictitious shares does not appear; but they were certificates connected in some way with Robson's long practiced frauds
LJSpeech-1.1/mels/LJ019-0146.pt|There was as yet no control over the prisoners after locking-up time;
LJSpeech-1.1/mels/LJ007-0091.pt|The lunatic became the sport of the idle and the depraved. His cure was out of the question;
LJSpeech-1.1/mels/LJ033-0087.pt|She thought that its color was similar to that of the bag found on the sixth floor of the School Book Depository after the assassination.
LJSpeech-1.1/mels/LJ050-0086.pt|Under these criteria, whether the case should be referred to the Secret Service depends on the existence of a previous history of mental instability,
LJSpeech-1.1/mels/LJ025-0011.pt|is Huxley's famous essay, "The Border Territory Between the Animal and Vegetable Kingdoms," written in eighteen seventy-six,
LJSpeech-1.1/mels/LJ003-0338.pt|End quote. it would cover some thirty acres, and cost a great deal more than the city, with the example of Whitecross Street prison before it,
LJSpeech-1.1/mels/LJ038-0282.pt|there is enough on it to say that it could have come, and even perhaps a little stronger, to say that it probably came from this,
LJSpeech-1.1/mels/LJ037-0075.pt|However, even in the absence of Mrs. Markham's testimony, there is ample evidence to identify Oswald as the killer of Tippit.
LJSpeech-1.1/mels/LJ003-0033.pt|Enough has been said, probably, to prove that there was room for improvement in the condition and treatment of debtors in the prisons of the city of London.
LJSpeech-1.1/mels/LJ041-0011.pt|Several witnesses testified that Lee Oswald was not aggressive. He was, however, involved in some fights.
LJSpeech-1.1/mels/LJ026-0102.pt|but root pressure due to osmosis, capillary action and evaporation from the leaves are factors.
LJSpeech-1.1/mels/LJ048-0078.pt|In each instance, liaison contacts should be developed to include a close friendly relationship,
|
PaddlePaddle/LanguageModeling/BERT/scripts | scripts | run_squad | # Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
echo "Container nvidia build = " $NVIDIA_BUILD_ID
init_checkpoint=${1:-"checkpoints/squad"}
epochs=${2:-"2"}
batch_size=${3:-"32"}
learning_rate=${4:-"4.6e-5"}
warmup_proportion=${5:-"0.2"}
precision=${6:-"amp"}
num_gpus=${7:-"8"}
seed=${8:-"1"}
squad_dir=${9:-"$BERT_PREP_WORKING_DIR/download/squad/v1.1"}
vocab_file=${10:-"vocab/bert-large-uncased-vocab.txt"}
OUT_DIR=${11:-"/results"}
mode=${12:-"train_eval"}
CONFIG_FILE=${13:-"None"}
max_steps=${14:-"-1"}
enable_benchmark=${15:-"false"}
benchmark_steps=${16:-"100"}
benchmark_warmup_steps=${17:-"100"}
fuse_mha=${18:-"true"}
echo "out dir is $OUT_DIR"
mkdir -p $OUT_DIR
if [ ! -d "$OUT_DIR" ]; then
echo "ERROR: non existing $OUT_DIR"
exit 1
fi
amp=""
FUSE_MHA=""
if [ "$precision" = "amp" ] ; then
echo "amp activated!"
amp=" --amp --use-dynamic-loss-scaling --scale-loss=128.0"
if [ "$fuse_mha" = "true" ] ; then
FUSE_MHA="--fuse-mha"
fi
fi
CONFIG=""
if [ "$CONFIG_FILE" != "None" ] ; then
CONFIG="--config-file=$CONFIG_FILE"
fi
BENCH=""
if [ "$enable_benchmark" = "true" ] ; then
BENCH="--benchmark --benchmark-steps=$benchmark_steps --benchmark-warmup-steps=$benchmark_warmup_steps"
fi
unset CUDA_VISIBLE_DEVICES
if [ "$num_gpus" = "1" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0"
elif [ "$num_gpus" = "2" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1"
elif [ "$num_gpus" = "3" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2"
elif [ "$num_gpus" = "4" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2,3"
elif [ "$num_gpus" = "5" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2,3,4"
elif [ "$num_gpus" = "6" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2,3,4,5"
elif [ "$num_gpus" = "7" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6"
elif [ "$num_gpus" = "8" ] ; then
CMD="python -m paddle.distributed.launch --gpus=0,1,2,3,4,5,6,7"
else
echo "Wrong number of gpus"
exit 2
fi
CMD+=" run_squad.py "
CMD+="--from-pretrained-params=$init_checkpoint "
if [ "$mode" = "train" ] ; then
CMD+="--do-train "
CMD+="--train-file=$squad_dir/train-v1.1.json "
CMD+="--train-batch-size=$batch_size "
elif [ "$mode" = "eval" ] ; then
CMD+="--do-predict "
CMD+="--predict-file=$squad_dir/dev-v1.1.json "
CMD+="--predict-batch-size=$batch_size "
CMD+="--eval-script=$squad_dir/evaluate-v1.1.py "
CMD+="--do-eval "
elif [ "$mode" = "prediction" ] ; then
CMD+="--do-predict "
CMD+="--predict-file=$squad_dir/dev-v1.1.json "
CMD+="--predict-batch-size=$batch_size "
else
CMD+=" --do-train "
CMD+=" --train-file=$squad_dir/train-v1.1.json "
CMD+=" --train-batch-size=$batch_size "
CMD+="--do-predict "
CMD+="--predict-file=$squad_dir/dev-v1.1.json "
CMD+="--predict-batch-size=$batch_size "
CMD+="--eval-script=$squad_dir/evaluate-v1.1.py "
CMD+="--do-eval "
fi
CMD+=" --do-lower-case "
CMD+=" --bert-model=bert-large-uncased "
CMD+=" --learning-rate=$learning_rate "
CMD+=" --seed=$seed "
CMD+=" --epochs=$epochs "
CMD+=" --max-seq-length=384 "
CMD+=" --doc-stride=128 "
CMD+=" --output-dir=$OUT_DIR "
CMD+=" --vocab-file=$vocab_file "
CMD+=" $CONFIG "
CMD+=" --max-steps=$max_steps "
CMD+=" --optimizer=AdamW "
CMD+=" --log-freq=100 "
CMD+=" $amp "
CMD+=" $FUSE_MHA "
CMD+=" $BENCH "
CMD+=" --report-file $OUT_DIR/dllogger_${num_gpus}_${precision}.json "
LOGFILE=$OUT_DIR/logfile.txt
echo "$CMD |& tee $LOGFILE"
time $CMD |& tee $LOGFILE
|
TensorFlow/Detection/SSD/examples | examples | SSD320_FP32_inference | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
PIPELINE_CONFIG_PATH=${1:-"/workdir/models/research/configs"}"/ssd320_full_1gpus.config"
SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
OBJECT_DETECTION=$(realpath $SCRIPT_DIR/../object_detection/)
PYTHONPATH=$PYTHONPATH:$OBJECT_DETECTION
python $SCRIPT_DIR/SSD320_inference.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
"${@:2}"
|
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer | perf_analyzer | exceptions | class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
|
PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit | deployment_toolkit | dump | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict, Iterable
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]):
assert all(
[current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)]
), "target_shape should have equal or greater all dimensions comparing to data.shape"
padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0)
(0, target_size - current_size)
for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:])
]
return np.pad(data, padding, "constant", constant_values=np.nan)
class NpzWriter:
"""
Dumps dicts of numpy arrays into npz files
It can/shall be used as context manager:
```
with OutputWriter('mydir') as writer:
writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))},
labels={'classes': np.zeros(8)},
inputs={'input': np.zeros((8, 240, 240, 3)})
```
## Variable size data
Only dynamic of last axis is handled. Data is padded with np.nan value.
Also each generated file may have different size of dynamic axis.
"""
def __init__(self, output_dir, compress=False):
self._output_dir = Path(output_dir)
self._items_cache: Dict[str, Dict[str, np.ndarray]] = {}
self._items_counters: Dict[str, int] = {}
self._flush_threshold_b = FLUSH_THRESHOLD_B
self._compress = compress
@property
def cache_size(self):
return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()}
def _append_to_cache(self, prefix, data):
if data is None:
return
if not isinstance(data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
cached_data = self._items_cache.get(prefix, {})
for name, value in data.items():
assert isinstance(
value, (list, np.ndarray)
), f"Values shall be lists or np.ndarrays; current type {type(value)}"
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value.dtype.kind in ["S", "U"] or not np.any(
np.isnan(value)
), f"Values with np.nan is not supported; {name}={value}"
cached_value = cached_data.get(name, None)
if cached_value is not None:
target_shape = np.max([cached_value.shape, value.shape], axis=0)
cached_value = pad_except_batch_axis(cached_value, target_shape)
value = pad_except_batch_axis(value, target_shape)
value = np.concatenate((cached_value, value))
cached_data[name] = value
self._items_cache[prefix] = cached_data
def write(self, **kwargs):
"""
Writes named list of dictionaries of np.ndarrays.
Finally keyword names will be later prefixes of npz files where those dictionaries will be stored.
ex. writer.write(inputs={'input': np.zeros((2, 10))},
outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))},
labels={'classes': np.zeros((2,))})
Args:
**kwargs: named list of dictionaries of np.ndarrays to store
"""
for prefix, data in kwargs.items():
self._append_to_cache(prefix, data)
biggest_item_size = max(self.cache_size.values())
if biggest_item_size > self._flush_threshold_b:
self.flush()
def flush(self):
for prefix, data in self._items_cache.items():
self._dump(prefix, data)
self._items_cache = {}
def _dump(self, prefix, data):
idx = self._items_counters.setdefault(prefix, 0)
filename = f"{prefix}-{idx:012d}.npz"
output_path = self._output_dir / filename
if self._compress:
np.savez_compressed(output_path, **data)
else:
np.savez(output_path, **data)
nitems = len(list(data.values())[0])
msg_for_labels = (
"If these are correct shapes - consider moving loading of them into metrics.py."
if prefix == "labels"
else ""
)
shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()}
assert all(len(v) == nitems for v in data.values()), (
f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}'
f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}'
)
self._items_counters[prefix] += nitems
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
|
PyTorch/SpeechSynthesis/FastPitch/common | common | stft | """
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].copy())
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, size=filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
with torch.no_grad():
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase, self.inverse_basis,
stride=self.hop_length, padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
|
TensorFlow2/Recommendation | Recommendation | README | # Recommender Systems
Recommender systems are a type of information filtering system that seeks to predict the
"rating" or "preference" a user would give to an item. (Source:
[Wikipedia](https://en.wikipedia.org/wiki/Recommender_system))
In an era where users have to navigate through an exponentially growing number of goods and services, recommender systems have become key in driving user engagement, teaching the internet services how to personalize experiences for users. They are ubiquitous and indispensable in commercial online platforms.
In this guide, you’ll find answers to how recommender systems work, how you might use it in your business, and more. Whether you’re an experienced machine learning engineer considering implementation, a developer wanting to learn more, or a product manager looking to explore what’s possible with recommender systems, this guide is for you.
Here is a look at what we will cover:
- Challenges and opportunities in recommender systems
- How does DL-based recommender systems work?
- Use cases and applications
## Challenges and opportunities in recommender systems
With the rapid growth in scale of industry datasets, deep learning (DL) recommender models have started to gain advantages over traditional methods by capitalizing on large amounts of training data. However, there are multiple challenges when it comes to performance of large-scale recommender systems solutions:
- Huge datasets: Commercial recommenders are trained on huge datasets, often several terabytes in scale.
- Complex data preprocessing and feature engineering pipelines: Datasets need to be preprocessed and transformed into a form relevant to be used with DL models and frameworks. In addition, feature engineering creates an extensive set of new features from existing ones, requiring multiple iterations to arrive at an optimal solution.
- Input bottleneck: Data loading, if not well optimized, can be the slowest part of the training process, leading to under-utilization of high-throughput computing devices such as GPUs.
- Extensive repeated experimentation: The whole data engineering, training, and evaluation process is generally repeated many times, requiring significant time and computational resources.
To meet the computational demands for large-scale DL recommender systems training and inference, recommender-on-GPU solutions aim to provide fast feature engineering and high training throughput (to enable both fast experimentation and production retraining), as well as low latency, high-throughput inference.
Current DL–based models for recommender systems include the [Wide and
Deep](https://arxiv.org/abs/1606.07792) model, Deep Learning Recommendation Model
([DLRM](https://github.com/facebookresearch/dlrm)), neural collaborative filtering
([NCF](https://arxiv.org/abs/1708.05031)), Variational Autoencoder
([VAE](https://arxiv.org/abs/1802.05814)) for Collaborative Filtering, and
[BERT4Rec](https://arxiv.org/pdf/1904.06690.pdf), among others.
## How does DL-based recommender systems work?
In [NVIDIA Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples), we introduce several popular state-of-the-art DL-based recommender models in Tensorflow and PyTorch.
As an example, we would like to start with discussing our reference implementation of DLRM. With DLRM, we systematically tackle the challenges mentioned by designing a complete DLRM pipeline, from data preparation to training to production inference. We provide ready-to-go Docker images for training and inference, data downloading and preprocessing tools, and Jupyter demo notebooks to get you started quickly. Also, trained models can be prepared for production inference in one simple step with our exporter tool.
For more details on the model architectures, example code, and how to set to end-to-end data processing, training, and inference pipeline on GPU, please refer to the [DLRM developer blog](https://developer.nvidia.com/blog/optimizing-dlrm-on-nvidia-gpus/) and [NVIDIA GPU-accelerated DL model portfolio ](https://github.com/NVIDIA/DeepLearningExamples) under /PyTorch/Recommendation/DLRM.
In addition, DLRM forms part of NVIDIA [Merlin](https://developer.nvidia.com/nvidia-merlin), a framework for building high-performance, DL–based recommender systems.
## Use cases and applications
### E-Commerce & Retail: Personalized Merchandising
Imagine a user has already purchased a scarf. Why not offer buying a hat that matches this hat, so that the look will be complete? This feature is often implemented by means of AI-based algorithms as “Complete the look” or “You might also like” sections in e-commerce platforms like Amazon, Walmart, Target, and many others.
On average, an intelligent recommender systems delivers a [22.66% lift in conversions rates](https://brandcdn.exacttarget.com/sites/exacttarget/files/deliverables/etmc-predictiveintelligencebenchmarkreport.pdf) for web products.
### Media & Entertainment: Personalized Content
AI based recommender engines can analyze the individual purchase behavior and detect patterns that will help provide a certain user with the content suggestions that will match his or her interests most likely. This is what Google and Facebook actively apply when recommending ads, or what Netflix does behind the scenes when recommending movies and TV shows.
### Personalized Banking
A mass market product that is consumed digitally by millions, banking is prime for recommendations. Knowing a customer’s detailed financial situation and their past preferences, coupled by data of thousands of similar users, is quite powerful.
|
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn | nn | trainer | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author: Tomasz Grel (tgrel@nvidia.com)
import tensorflow as tf
import horovod.tensorflow as hvd
from distributed_embeddings.python.layers import dist_model_parallel as dmp
from .nn_utils import create_inputs_dict
class Trainer:
def __init__(self, model, embedding_optimizer, mlp_optimizer, amp, lr_scheduler, tf_dataset_op, cpu):
self.model = model
self.embedding_optimizer = embedding_optimizer
self.mlp_optimizer = mlp_optimizer
self.amp = amp
self.lr_scheduler = lr_scheduler
self.bce = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True)
self.cpu = cpu
self.tf_dataset_op = tf_dataset_op
self.dataset_iter = iter(self.tf_dataset_op())
def _weight_update(self, gradients):
if self.amp:
gradients = self.mlp_optimizer.get_unscaled_gradients(gradients)
dense_gradients, dense_variables = [], []
embedding_gradients, embedding_variables = [], []
embedding_refs = set(v.ref() for v in self.model.sparse_model.trainable_variables)
for var, grad in zip(self.model.trainable_variables, gradients):
if var.ref() in embedding_refs:
embedding_variables.append(var)
embedding_gradients.append(grad)
else:
dense_variables.append(var)
dense_gradients.append(grad)
self.mlp_optimizer.apply_gradients(zip(dense_gradients, dense_variables))
self.embedding_optimizer.apply_gradients(zip(embedding_gradients, embedding_variables))
@tf.function
def train_step(self):
device = '/CPU:0' if self.cpu else '/GPU:0'
with tf.device(device):
self.lr_scheduler()
with tf.name_scope("dataloading"):
(numerical_features, categorical_features), labels = self.dataset_iter.get_next()
inputs = create_inputs_dict(numerical_features, categorical_features)
with tf.GradientTape() as tape:
predictions = self.model(inputs=inputs, training=True)
unscaled_loss = self.bce(labels, predictions)
# tf keras doesn't reduce the loss when using a Custom Training Loop
unscaled_loss = tf.math.reduce_mean(unscaled_loss)
scaled_loss = self.mlp_optimizer.get_scaled_loss(unscaled_loss) if self.amp else unscaled_loss
if hvd.size() > 1:
tape = dmp.DistributedGradientTape(tape)
gradients = tape.gradient(scaled_loss, self.model.trainable_variables)
self._weight_update(gradients)
if hvd.size() > 1:
# compute mean loss for all workers for reporting
mean_loss = hvd.allreduce(unscaled_loss, name="mean_loss", op=hvd.Average)
else:
mean_loss = unscaled_loss
return mean_loss
|
PyTorch/SpeechSynthesis/Tacotron2 | Tacotron2 | train | # *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import time
import argparse
import numpy as np
from contextlib import contextmanager
import torch
from torch.utils.data import DataLoader
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
import models
import loss_functions
import data_functions
from tacotron2_common.utils import ParseFromConfigFile
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('-m', '--model-name', type=str, default='', required=True,
help='Model to train')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--anneal-steps', nargs='*',
help='Epochs after which decrease learning rate')
parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,
help='Factor for annealing learning rate')
parser.add_argument('--config-file', action=ParseFromConfigFile,
type=str, help='Path to configuration file')
parser.add_argument('--seed', default=None, type=int,
help='Seed for random number generators')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
training.add_argument('--epochs-per-checkpoint', type=int, default=50,
help='Number of epochs per checkpoint')
training.add_argument('--checkpoint-path', type=str, default='',
help='Checkpoint path to resume training')
training.add_argument('--resume-from-last', action='store_true',
help='Resumes training from the last checkpoint; uses the directory provided with \'--output\' option to search for the checkpoint \"checkpoint_<model_name>_last.pt\"')
training.add_argument('--dynamic-loss-scaling', type=bool, default=True,
help='Enable dynamic loss scaling')
training.add_argument('--amp', action='store_true',
help='Enable AMP')
training.add_argument('--cudnn-enabled', action='store_true',
help='Enable cudnn')
training.add_argument('--cudnn-benchmark', action='store_true',
help='Run cudnn benchmark')
training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true',
help='disable uniform initialization of batchnorm layer weight')
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument(
'--use-saved-learning-rate', default=False, type=bool)
optimization.add_argument('-lr', '--learning-rate', type=float, required=True,
help='Learing rate')
optimization.add_argument('--weight-decay', default=1e-6, type=float,
help='Weight decay')
optimization.add_argument('--grad-clip-thresh', default=1.0, type=float,
help='Clip threshold for gradients')
optimization.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
optimization.add_argument('--grad-clip', default=5.0, type=float,
help='Enables gradient clipping and sets maximum gradient norm value')
# dataset parameters
dataset = parser.add_argument_group('dataset parameters')
dataset.add_argument('--load-mel-from-disk', action='store_true',
help='Loads mel spectrograms from disk instead of computing them on the fly')
dataset.add_argument('--training-files',
default='filelists/ljs_audio_text_train_filelist.txt',
type=str, help='Path to training filelist')
dataset.add_argument('--validation-files',
default='filelists/ljs_audio_text_val_filelist.txt',
type=str, help='Path to validation filelist')
dataset.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
# audio parameters
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
distributed = parser.add_argument_group('distributed setup')
# distributed.add_argument('--distributed-run', default=True, type=bool,
# help='enable distributed run')
distributed.add_argument('--rank', default=0, type=int,
help='Rank of the process, do not set! Done by multiproc module')
distributed.add_argument('--world-size', default=1, type=int,
help='Number of processes, do not set! Done by multiproc module')
distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456',
help='Url used to set up distributed training')
distributed.add_argument('--group-name', type=str, default='group_name',
required=False, help='Distributed group name')
distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'},
help='Distributed run backend')
benchmark = parser.add_argument_group('benchmark')
benchmark.add_argument('--bench-class', type=str, default='')
return parser
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt/num_gpus
else:
rt = torch.div(rt, num_gpus, rounding_mode='floor')
return rt
def init_distributed(args, world_size, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=world_size, rank=rank, group_name=group_name)
print("Done initializing distributed")
def save_checkpoint(model, optimizer, scaler, epoch, config, output_dir,
model_name, local_rank, world_size):
random_rng_state = torch.random.get_rng_state().cuda()
cuda_rng_state = torch.cuda.get_rng_state(local_rank).cuda()
random_rng_states_all = [torch.empty_like(random_rng_state) for _ in range(world_size)]
cuda_rng_states_all = [torch.empty_like(cuda_rng_state) for _ in range(world_size)]
if world_size > 1:
dist.all_gather(random_rng_states_all, random_rng_state)
dist.all_gather(cuda_rng_states_all, cuda_rng_state)
else:
random_rng_states_all = [random_rng_state]
cuda_rng_states_all = [cuda_rng_state]
random_rng_states_all = torch.stack(random_rng_states_all).cpu()
cuda_rng_states_all = torch.stack(cuda_rng_states_all).cpu()
if local_rank == 0:
checkpoint = {'epoch': epoch,
'cuda_rng_state_all': cuda_rng_states_all,
'random_rng_states_all': random_rng_states_all,
'config': config,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scaler': scaler.state_dict()}
checkpoint_filename = "checkpoint_{}_{}.pt".format(model_name, epoch)
checkpoint_path = os.path.join(output_dir, checkpoint_filename)
print("Saving model and optimizer state at epoch {} to {}".format(
epoch, checkpoint_path))
torch.save(checkpoint, checkpoint_path)
symlink_src = checkpoint_filename
symlink_dst = os.path.join(
output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink_dst) and os.path.islink(symlink_dst):
print("Updating symlink", symlink_dst, "to point to", symlink_src)
os.remove(symlink_dst)
os.symlink(symlink_src, symlink_dst)
def get_last_checkpoint_filename(output_dir, model_name):
symlink = os.path.join(output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink):
print("Loading checkpoint from symlink", symlink)
return os.path.join(output_dir, os.readlink(symlink))
else:
print("No last checkpoint available - starting from epoch 0 ")
return ""
def load_checkpoint(model, optimizer, scaler, epoch, filepath, local_rank):
checkpoint = torch.load(filepath, map_location='cpu')
epoch[0] = checkpoint['epoch']+1
device_id = local_rank % torch.cuda.device_count()
torch.cuda.set_rng_state(checkpoint['cuda_rng_state_all'][device_id])
if 'random_rng_states_all' in checkpoint:
torch.random.set_rng_state(checkpoint['random_rng_states_all'][device_id])
elif 'random_rng_state' in checkpoint:
torch.random.set_rng_state(checkpoint['random_rng_state'])
else:
raise Exception("Model checkpoint must have either 'random_rng_state' or 'random_rng_states_all' key.")
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
return checkpoint['config']
# adapted from: https://discuss.pytorch.org/t/opinion-eval-should-be-a-context-manager/18998/3
# Following snippet is licensed under MIT license
@contextmanager
def evaluating(model):
'''Temporarily switch to evaluation mode.'''
istrain = model.training
try:
model.eval()
yield model
finally:
if istrain:
model.train()
def validate(model, criterion, valset, epoch, batch_iter, batch_size,
world_size, collate_fn, distributed_run, perf_bench, batch_to_gpu, amp_run):
"""Handles all the validation scoring and printing"""
with evaluating(model), torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, num_workers=1, shuffle=False,
sampler=val_sampler,
batch_size=batch_size, pin_memory=False,
collate_fn=collate_fn,
drop_last=(True if perf_bench else False))
val_loss = 0.0
num_iters = 0
val_items_per_sec = 0.0
for i, batch in enumerate(val_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
x, y, num_items = batch_to_gpu(batch)
#AMP upstream autocast
with torch.cuda.amp.autocast(enabled=amp_run):
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else: #
reduced_val_loss = loss.item()
reduced_num_items = num_items.item()
val_loss += reduced_val_loss
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})
val_items_per_sec += items_per_sec
num_iters += 1
val_loss = val_loss/num_iters
val_items_per_sec = val_items_per_sec/num_iters
DLLogger.log(step=(epoch,), data={'val_loss': val_loss})
DLLogger.log(step=(epoch,), data={'val_items_per_sec': val_items_per_sec})
return val_loss, val_items_per_sec
def adjust_learning_rate(iteration, epoch, optimizer, learning_rate,
anneal_steps, anneal_factor, rank):
p = 0
if anneal_steps is not None:
for i, a_step in enumerate(anneal_steps):
if epoch >= int(a_step):
p = p+1
if anneal_factor == 0.3:
lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))
else:
lr = learning_rate*(anneal_factor ** p)
if optimizer.param_groups[0]['lr'] != lr:
DLLogger.log(step=(epoch, iteration), data={'learning_rate changed': str(optimizer.param_groups[0]['lr'])+" -> "+str(lr)})
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
if 'LOCAL_RANK' in os.environ and 'WORLD_SIZE' in os.environ:
local_rank = int(os.environ['LOCAL_RANK'])
world_size = int(os.environ['WORLD_SIZE'])
else:
local_rank = args.rank
world_size = args.world_size
distributed_run = world_size > 1
if args.seed is not None:
torch.manual_seed(args.seed + local_rank)
np.random.seed(args.seed + local_rank)
if local_rank == 0:
log_file = os.path.join(args.output, args.log_file)
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
StdOutBackend(Verbosity.VERBOSE)])
else:
DLLogger.init(backends=[])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
DLLogger.metadata('run_time', {'unit': 's'})
DLLogger.metadata('val_loss', {'unit': None})
DLLogger.metadata('train_items_per_sec', {'unit': 'items/s'})
DLLogger.metadata('val_items_per_sec', {'unit': 'items/s'})
model_name = args.model_name
parser = models.model_parser(model_name, parser)
args, _ = parser.parse_known_args()
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if distributed_run:
init_distributed(args, world_size, local_rank, args.group_name)
torch.cuda.synchronize()
run_start_time = time.perf_counter()
model_config = models.get_model_config(model_name, args)
model = models.get_model(model_name, model_config,
cpu_run=False,
uniform_initialize_bn_weight=not args.disable_uniform_initialize_bn_weight)
if distributed_run:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,
weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
try:
sigma = args.sigma
except AttributeError:
sigma = None
start_epoch = [0]
if args.resume_from_last:
args.checkpoint_path = get_last_checkpoint_filename(args.output, model_name)
if args.checkpoint_path != "":
model_config = load_checkpoint(model, optimizer, scaler, start_epoch,
args.checkpoint_path, local_rank)
start_epoch = start_epoch[0]
criterion = loss_functions.get_loss_function(model_name, sigma)
try:
n_frames_per_step = args.n_frames_per_step
except AttributeError:
n_frames_per_step = None
collate_fn = data_functions.get_collate_function(
model_name, n_frames_per_step)
trainset = data_functions.get_data_loader(
model_name, args.dataset_path, args.training_files, args)
if distributed_run:
train_sampler = DistributedSampler(trainset, seed=(args.seed or 0))
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=args.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
valset = data_functions.get_data_loader(
model_name, args.dataset_path, args.validation_files, args)
batch_to_gpu = data_functions.get_batch_to_gpu(model_name)
iteration = 0
train_epoch_items_per_sec = 0.0
val_loss = 0.0
num_iters = 0
model.train()
for epoch in range(start_epoch, args.epochs):
torch.cuda.synchronize()
epoch_start_time = time.perf_counter()
# used to calculate avg items/sec over epoch
reduced_num_items_epoch = 0
train_epoch_items_per_sec = 0.0
num_iters = 0
reduced_loss = 0
if distributed_run:
train_loader.sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
DLLogger.log(step=(epoch, i),
data={'glob_iter/iters_per_epoch': str(iteration)+"/"+str(len(train_loader))})
adjust_learning_rate(iteration, epoch, optimizer, args.learning_rate,
args.anneal_steps, args.anneal_factor, local_rank)
model.zero_grad()
x, y, num_items = batch_to_gpu(batch)
#AMP upstream autocast
with torch.cuda.amp.autocast(enabled=args.amp):
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else:
reduced_loss = loss.item()
reduced_num_items = num_items.item()
if np.isnan(reduced_loss):
raise Exception("loss is NaN")
DLLogger.log(step=(epoch,i), data={'train_loss': reduced_loss})
num_iters += 1
# accumulate number of items processed in this epoch
reduced_num_items_epoch += reduced_num_items
if args.amp:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
optimizer.step()
model.zero_grad(set_to_none=True)
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
train_epoch_items_per_sec += items_per_sec
DLLogger.log(step=(epoch, i), data={'train_items_per_sec': items_per_sec})
DLLogger.log(step=(epoch, i), data={'train_iter_time': iter_time})
iteration += 1
torch.cuda.synchronize()
epoch_stop_time = time.perf_counter()
epoch_time = epoch_stop_time - epoch_start_time
DLLogger.log(step=(epoch,), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
DLLogger.log(step=(epoch,), data={'train_loss': reduced_loss})
DLLogger.log(step=(epoch,), data={'train_epoch_time': epoch_time})
val_loss, val_items_per_sec = validate(model, criterion, valset, epoch,
iteration, args.batch_size,
world_size, collate_fn,
distributed_run, args.bench_class=="perf-train",
batch_to_gpu,
args.amp)
if (epoch % args.epochs_per_checkpoint == 0) and (args.bench_class == "" or args.bench_class == "train"):
save_checkpoint(model, optimizer, scaler, epoch, model_config,
args.output, args.model_name, local_rank, world_size)
if local_rank == 0:
DLLogger.flush()
torch.cuda.synchronize()
run_stop_time = time.perf_counter()
run_time = run_stop_time - run_start_time
DLLogger.log(step=tuple(), data={'run_time': run_time})
DLLogger.log(step=tuple(), data={'val_loss': val_loss})
DLLogger.log(step=tuple(), data={'train_loss': reduced_loss})
DLLogger.log(step=tuple(), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
DLLogger.log(step=tuple(), data={'val_items_per_sec': val_items_per_sec})
if local_rank == 0:
DLLogger.flush()
if __name__ == '__main__':
main()
|
TensorFlow/Classification/ConvNets/se-resnext101-32x4d/training | training | DGX2_SE-RNxt101-32x4d_AMP_250E | #!/bin/bash
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKSPACE=${1:-"/workspace/rn50v15_tf"}
DATA_DIR=${2:-"/data"}
OTHER=${@:3}
if [[ ! -z "${BIND_TO_SOCKET}" ]]; then
BIND_TO_SOCKET="--bind-to socket"
fi
mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnext101-32x4d \
--mode=train_and_evaluate --iter_unit=epoch --num_iter=250 --mixup=0.2 \
--batch_size=96 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \
--lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \
--amp --static_loss_scale 128 \
--data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \
--results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
|
PyTorch/Translation/Transformer/scripts | scripts | run_preprocessing | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DATASET_DIR=/data/wmt14_en_de_joined_dict
TEXT=examples/translation/wmt14_en_de
(
cd examples/translation
bash prepare-wmt14en2de.sh --scaling18
)
python preprocess.py \
--source-lang en \
--target-lang de \
--trainpref $TEXT/train \
--validpref $TEXT/valid \
--testpref $TEXT/test \
--destdir ${DATASET_DIR} \
--nwordssrc 33712 \
--nwordstgt 33712 \
--joined-dictionary
cp $TEXT/code $DATASET_DIR/code
cp $TEXT/tmp/valid.raw.de $DATASET_DIR/valid.raw.de
sacrebleu -t wmt14/full -l en-de --echo ref > $DATASET_DIR/test.raw.de
|
TensorFlow2/Recommendation/WideAndDeep/triton/runner | runner | requirements | # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dataclasses>=0.6; python_version<'3.7'
tqdm>=4.44.1
docker==5.0.0
colorama==0.4.4
pytz==2021.1
coloredlogs==15.0.1
py-cpuinfo==8.0.0
psutil==5.8.0
retrying>=1.3.3 |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util | util | CharacterMappingReader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CharacterMappingReader.hpp"
#include <fstream>
#include <stdexcept>
namespace tts
{
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
bool isBlank(const std::string& line)
{
for (const unsigned char c : line) {
if (!std::isspace(c)) {
return false;
}
}
return true;
}
bool isComment(const std::string& line)
{
for (const unsigned char c : line) {
if (std::isspace(c)) {
// keep searching
} else if (c == '#') {
return true;
}
}
return false;
}
void parseKeyPair(
const std::string& line, int* const num, std::string* const symbol)
{
assert(num != nullptr);
assert(symbol != nullptr);
for (size_t i = 1; i + 1 < line.size(); ++i) {
if (std::isspace(static_cast<unsigned char>(line[i]))) {
// a valid key pair will be a number, a whitespace, and the rest will be
// treated as the symbol.
*num = std::stol(line.substr(0, i));
*symbol = line.substr(i + 1);
return;
}
}
// if we found no space
throw std::runtime_error("Failed to parse line '" + line + "'.");
}
} // namespace
/******************************************************************************
* PUBLIC STATIC METHODS ******************************************************
*****************************************************************************/
CharacterMapping
CharacterMappingReader::loadFromFile(const std::string& filename)
{
std::ifstream fin(filename);
if (!fin.good()) {
throw std::runtime_error("Failed to open '" + filename + "'.");
}
// read the file line by line
CharacterMapping mapping;
std::string line;
std::string symbol;
int num;
while (std::getline(fin, line)) {
if (isBlank(line)) {
// do nothing
} else if (isComment(line)) {
// do nothing
} else {
parseKeyPair(line, &num, &symbol);
mapping.set(symbol, num);
}
}
if (fin.bad()) {
throw std::runtime_error("Error while reading '" + filename + "'.");
}
return mapping;
}
} // namespace tts
|
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/callbacks/callbacks | callbacks | save_best_checkpoint | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_target_: callbacks.ctl_callbacks.SaveBestCheckpoint
metric: val_loss
|
TensorFlow/LanguageModeling/BERT | BERT | .gitignore | # Initially taken from Github's Python gitignore file
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
*.png
.idea/
*swp
data/
checkpoints/
data_dl/
# C extensions
*.so
#Data
data/download
data/extracted
data/formatted_one_article_per_line
data/sharded
data/hdf5*
data/tfrecord*
data/*/*.zip
#Resutls
results/
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
.vscode/
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# TensorRT
*.engine
models/
bertSquadCalibCache
predictions.json
|
PyTorch/SpeechSynthesis/HiFiGAN/scripts | scripts | inference_example | #!/usr/bin/env bash
export CUDNN_V8_API_ENABLED=1 # Keep the flag for older containers
export TORCH_CUDNN_V8_API_ENABLED=1
: ${DATASET_DIR:="data/LJSpeech-1.1"}
: ${BATCH_SIZE:=16}
: ${FILELIST:="data/filelists/devset10.tsv"}
: ${AMP:=false}
: ${TORCHSCRIPT:=false}
: ${WARMUP:=0}
: ${REPEATS:=1}
: ${CUDA:=true}
: ${CUDNN_BENCHMARK:=false} # better performance, but takes a while to warm-up
: ${PHONE:=true}
# : ${FASTPITCH=""} # Disable mel-spec generator and synthesize from ground truth mels
# : ${HIFIGAN="pretrained_models/hifigan/hifigan_gen_checkpoint_6500.pt"} # Clean HiFi-GAN model
# Mel-spectrogram generator (optional)
: ${FASTPITCH="pretrained_models/fastpitch/nvidia_fastpitch_210824.pt"}
# Vocoder; set only one
: ${HIFIGAN="pretrained_models/hifigan/hifigan_gen_checkpoint_10000_ft.pt"} # Finetuned for FastPitch
: ${WAVEGLOW=""}
# Download pre-trained checkpoints
[[ "$HIFIGAN" == "pretrained_models/hifigan/hifigan_gen_checkpoint_6500.pt" && ! -f "$HIFIGAN" ]] && { echo "Downloading $HIFIGAN from NGC..."; bash scripts/download_models.sh hifigan; }
[[ "$HIFIGAN" == "pretrained_models/hifigan/hifigan_gen_checkpoint_10000_ft.pt" && ! -f "$HIFIGAN" ]] && { echo "Downloading $HIFIGAN from NGC..."; bash scripts/download_models.sh hifigan-finetuned-fastpitch; }
[[ "$FASTPITCH" == "pretrained_models/fastpitch/nvidia_fastpitch_210824.pt" && ! -f "$FASTPITCH" ]] && { echo "Downloading $FASTPITCH from NGC..."; bash scripts/download_models.sh fastpitch; }
# Synthesis
: ${SPEAKER:=0}
: ${DENOISING:=0.005}
if [ ! -n "$OUTPUT_DIR" ]; then
OUTPUT_DIR="./output/audio_$(basename ${FILELIST} .tsv)"
[ "$AMP" = true ] && OUTPUT_DIR+="_fp16"
[ "$AMP" = false ] && OUTPUT_DIR+="_fp32"
[ -n "$FASTPITCH" ] && OUTPUT_DIR+="_fastpitch"
[ ! -n "$FASTPITCH" ] && OUTPUT_DIR+="_gt-mel"
[ -n "$WAVEGLOW" ] && OUTPUT_DIR+="_waveglow"
[ -n "$HIFIGAN" ] && OUTPUT_DIR+="_hifigan"
OUTPUT_DIR+="_denoise-"${DENOISING}
fi
: ${LOG_FILE:="$OUTPUT_DIR/nvlog_infer.json"}
mkdir -p "$OUTPUT_DIR"
echo -e "\nAMP=$AMP, batch_size=$BATCH_SIZE\n"
ARGS+=" --dataset-path $DATASET_DIR"
ARGS+=" -i $FILELIST"
ARGS+=" -o $OUTPUT_DIR"
ARGS+=" --log-file $LOG_FILE"
ARGS+=" --batch-size $BATCH_SIZE"
ARGS+=" --denoising-strength $DENOISING"
ARGS+=" --warmup-steps $WARMUP"
ARGS+=" --repeats $REPEATS"
ARGS+=" --speaker $SPEAKER"
[ "$AMP" = true ] && ARGS+=" --amp"
[ "$CUDA" = true ] && ARGS+=" --cuda"
[ "$CUDNN_BENCHMARK" = true ] && ARGS+=" --cudnn-benchmark"
[ "$TORCHSCRIPT" = true ] && ARGS+=" --torchscript"
[ -n "$HIFIGAN" ] && ARGS+=" --hifigan $HIFIGAN"
[ -n "$WAVEGLOW" ] && ARGS+=" --waveglow $WAVEGLOW"
[ -n "$FASTPITCH" ] && ARGS+=" --fastpitch $FASTPITCH"
[ "$PHONE" = true ] && ARGS+=" --p-arpabet 1.0"
python inference.py $ARGS "$@"
|
TensorFlow2/LanguageModeling/BERT/official/utils/logs | logs | logger | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging utilities for benchmark.
For collecting local environment metrics like CPU and memory, certain python
packages need be installed. See README for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import datetime
import json
import multiprocessing
import numbers
import os
import threading
import uuid
from six.moves import _thread as thread
from absl import flags
import tensorflow as tf
from tensorflow.python.client import device_lib
from official.utils.logs import cloud_lib
METRIC_LOG_FILE_NAME = "metric.log"
BENCHMARK_RUN_LOG_FILE_NAME = "benchmark_run.log"
_DATE_TIME_FORMAT_PATTERN = "%Y-%m-%dT%H:%M:%S.%fZ"
GCP_TEST_ENV = "GCP"
RUN_STATUS_SUCCESS = "success"
RUN_STATUS_FAILURE = "failure"
RUN_STATUS_RUNNING = "running"
FLAGS = flags.FLAGS
# Don't use it directly. Use get_benchmark_logger to access a logger.
_benchmark_logger = None
_logger_lock = threading.Lock()
def config_benchmark_logger(flag_obj=None):
"""Config the global benchmark logger."""
_logger_lock.acquire()
try:
global _benchmark_logger
if not flag_obj:
flag_obj = FLAGS
if (not hasattr(flag_obj, "benchmark_logger_type") or
flag_obj.benchmark_logger_type == "BaseBenchmarkLogger"):
_benchmark_logger = BaseBenchmarkLogger()
elif flag_obj.benchmark_logger_type == "BenchmarkFileLogger":
_benchmark_logger = BenchmarkFileLogger(flag_obj.benchmark_log_dir)
elif flag_obj.benchmark_logger_type == "BenchmarkBigQueryLogger":
from official.benchmark import benchmark_uploader as bu # pylint: disable=g-import-not-at-top
bq_uploader = bu.BigQueryUploader(gcp_project=flag_obj.gcp_project)
_benchmark_logger = BenchmarkBigQueryLogger(
bigquery_uploader=bq_uploader,
bigquery_data_set=flag_obj.bigquery_data_set,
bigquery_run_table=flag_obj.bigquery_run_table,
bigquery_run_status_table=flag_obj.bigquery_run_status_table,
bigquery_metric_table=flag_obj.bigquery_metric_table,
run_id=str(uuid.uuid4()))
else:
raise ValueError("Unrecognized benchmark_logger_type: %s"
% flag_obj.benchmark_logger_type)
finally:
_logger_lock.release()
return _benchmark_logger
def get_benchmark_logger():
if not _benchmark_logger:
config_benchmark_logger()
return _benchmark_logger
@contextlib.contextmanager
def benchmark_context(flag_obj):
"""Context of benchmark, which will update status of the run accordingly."""
benchmark_logger = config_benchmark_logger(flag_obj)
try:
yield
benchmark_logger.on_finish(RUN_STATUS_SUCCESS)
except Exception: # pylint: disable=broad-except
# Catch all the exception, update the run status to be failure, and re-raise
benchmark_logger.on_finish(RUN_STATUS_FAILURE)
raise
class BaseBenchmarkLogger(object):
"""Class to log the benchmark information to STDOUT."""
def log_evaluation_result(self, eval_results):
"""Log the evaluation result.
The evaluate result is a dictionary that contains metrics defined in
model_fn. It also contains a entry for global_step which contains the value
of the global step when evaluation was performed.
Args:
eval_results: dict, the result of evaluate.
"""
if not isinstance(eval_results, dict):
tf.compat.v1.logging.warning(
"eval_results should be dictionary for logging. Got %s",
type(eval_results))
return
global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP]
for key in sorted(eval_results):
if key != tf.compat.v1.GraphKeys.GLOBAL_STEP:
self.log_metric(key, eval_results[key], global_step=global_step)
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
"""Log the benchmark metric information to local file.
Currently the logging is done in a synchronized way. This should be updated
to log asynchronously.
Args:
name: string, the name of the metric to log.
value: number, the value of the metric. The value will not be logged if it
is not a number type.
unit: string, the unit of the metric, E.g "image per second".
global_step: int, the global_step when the metric is logged.
extras: map of string:string, the extra information about the metric.
"""
metric = _process_metric_to_json(name, value, unit, global_step, extras)
if metric:
tf.compat.v1.logging.info("Benchmark metric: %s", metric)
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
tf.compat.v1.logging.info(
"Benchmark run: %s", _gather_run_info(model_name, dataset_name,
run_params, test_id))
def on_finish(self, status):
pass
class BenchmarkFileLogger(BaseBenchmarkLogger):
"""Class to log the benchmark information to local disk."""
def __init__(self, logging_dir):
super(BenchmarkFileLogger, self).__init__()
self._logging_dir = logging_dir
if not tf.io.gfile.isdir(self._logging_dir):
tf.io.gfile.makedirs(self._logging_dir)
self._metric_file_handler = tf.io.gfile.GFile(
os.path.join(self._logging_dir, METRIC_LOG_FILE_NAME), "a")
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
"""Log the benchmark metric information to local file.
Currently the logging is done in a synchronized way. This should be updated
to log asynchronously.
Args:
name: string, the name of the metric to log.
value: number, the value of the metric. The value will not be logged if it
is not a number type.
unit: string, the unit of the metric, E.g "image per second".
global_step: int, the global_step when the metric is logged.
extras: map of string:string, the extra information about the metric.
"""
metric = _process_metric_to_json(name, value, unit, global_step, extras)
if metric:
try:
json.dump(metric, self._metric_file_handler)
self._metric_file_handler.write("\n")
self._metric_file_handler.flush()
except (TypeError, ValueError) as e:
tf.compat.v1.logging.warning(
"Failed to dump metric to log file: name %s, value %s, error %s",
name, value, e)
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
"""Collect most of the TF runtime information for the local env.
The schema of the run info follows official/benchmark/datastore/schema.
Args:
model_name: string, the name of the model.
dataset_name: string, the name of dataset for training and evaluation.
run_params: dict, the dictionary of parameters for the run, it could
include hyperparameters or other params that are important for the run.
test_id: string, the unique name of the test run by the combination of key
parameters, eg batch size, num of GPU. It is hardware independent.
"""
run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
with tf.io.gfile.GFile(os.path.join(
self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), "w") as f:
try:
json.dump(run_info, f)
f.write("\n")
except (TypeError, ValueError) as e:
tf.compat.v1.logging.warning(
"Failed to dump benchmark run info to log file: %s", e)
def on_finish(self, status):
self._metric_file_handler.flush()
self._metric_file_handler.close()
class BenchmarkBigQueryLogger(BaseBenchmarkLogger):
"""Class to log the benchmark information to BigQuery data store."""
def __init__(self,
bigquery_uploader,
bigquery_data_set,
bigquery_run_table,
bigquery_run_status_table,
bigquery_metric_table,
run_id):
super(BenchmarkBigQueryLogger, self).__init__()
self._bigquery_uploader = bigquery_uploader
self._bigquery_data_set = bigquery_data_set
self._bigquery_run_table = bigquery_run_table
self._bigquery_run_status_table = bigquery_run_status_table
self._bigquery_metric_table = bigquery_metric_table
self._run_id = run_id
def log_metric(self, name, value, unit=None, global_step=None, extras=None):
"""Log the benchmark metric information to bigquery.
Args:
name: string, the name of the metric to log.
value: number, the value of the metric. The value will not be logged if it
is not a number type.
unit: string, the unit of the metric, E.g "image per second".
global_step: int, the global_step when the metric is logged.
extras: map of string:string, the extra information about the metric.
"""
metric = _process_metric_to_json(name, value, unit, global_step, extras)
if metric:
# Starting new thread for bigquery upload in case it might take long time
# and impact the benchmark and performance measurement. Starting a new
# thread might have potential performance impact for model that run on
# CPU.
thread.start_new_thread(
self._bigquery_uploader.upload_benchmark_metric_json,
(self._bigquery_data_set,
self._bigquery_metric_table,
self._run_id,
[metric]))
def log_run_info(self, model_name, dataset_name, run_params, test_id=None):
"""Collect most of the TF runtime information for the local env.
The schema of the run info follows official/benchmark/datastore/schema.
Args:
model_name: string, the name of the model.
dataset_name: string, the name of dataset for training and evaluation.
run_params: dict, the dictionary of parameters for the run, it could
include hyperparameters or other params that are important for the run.
test_id: string, the unique name of the test run by the combination of key
parameters, eg batch size, num of GPU. It is hardware independent.
"""
run_info = _gather_run_info(model_name, dataset_name, run_params, test_id)
# Starting new thread for bigquery upload in case it might take long time
# and impact the benchmark and performance measurement. Starting a new
# thread might have potential performance impact for model that run on CPU.
thread.start_new_thread(
self._bigquery_uploader.upload_benchmark_run_json,
(self._bigquery_data_set,
self._bigquery_run_table,
self._run_id,
run_info))
thread.start_new_thread(
self._bigquery_uploader.insert_run_status,
(self._bigquery_data_set,
self._bigquery_run_status_table,
self._run_id,
RUN_STATUS_RUNNING))
def on_finish(self, status):
self._bigquery_uploader.update_run_status(
self._bigquery_data_set,
self._bigquery_run_status_table,
self._run_id,
status)
def _gather_run_info(model_name, dataset_name, run_params, test_id):
"""Collect the benchmark run information for the local environment."""
run_info = {
"model_name": model_name,
"dataset": {"name": dataset_name},
"machine_config": {},
"test_id": test_id,
"run_date": datetime.datetime.utcnow().strftime(
_DATE_TIME_FORMAT_PATTERN)}
_collect_tensorflow_info(run_info)
_collect_tensorflow_environment_variables(run_info)
_collect_run_params(run_info, run_params)
_collect_cpu_info(run_info)
_collect_memory_info(run_info)
_collect_test_environment(run_info)
return run_info
def _process_metric_to_json(
name, value, unit=None, global_step=None, extras=None):
"""Validate the metric data and generate JSON for insert."""
if not isinstance(value, numbers.Number):
tf.compat.v1.logging.warning(
"Metric value to log should be a number. Got %s", type(value))
return None
extras = _convert_to_json_dict(extras)
return {
"name": name,
"value": float(value),
"unit": unit,
"global_step": global_step,
"timestamp": datetime.datetime.utcnow().strftime(
_DATE_TIME_FORMAT_PATTERN),
"extras": extras}
def _collect_tensorflow_info(run_info):
run_info["tensorflow_version"] = {
"version": tf.version.VERSION, "git_hash": tf.version.GIT_VERSION}
def _collect_run_params(run_info, run_params):
"""Log the parameter information for the benchmark run."""
def process_param(name, value):
type_check = {
str: {"name": name, "string_value": value},
int: {"name": name, "long_value": value},
bool: {"name": name, "bool_value": str(value)},
float: {"name": name, "float_value": value},
}
return type_check.get(type(value),
{"name": name, "string_value": str(value)})
if run_params:
run_info["run_parameters"] = [
process_param(k, v) for k, v in sorted(run_params.items())]
def _collect_tensorflow_environment_variables(run_info):
run_info["tensorflow_environment_variables"] = [
{"name": k, "value": v}
for k, v in sorted(os.environ.items()) if k.startswith("TF_")]
# The following code is mirrored from tensorflow/tools/test/system_info_lib
# which is not exposed for import.
def _collect_cpu_info(run_info):
"""Collect the CPU information for the local environment."""
cpu_info = {}
cpu_info["num_cores"] = multiprocessing.cpu_count()
try:
# Note: cpuinfo is not installed in the TensorFlow OSS tree.
# It is installable via pip.
import cpuinfo # pylint: disable=g-import-not-at-top
info = cpuinfo.get_cpu_info()
cpu_info["cpu_info"] = info["brand"]
cpu_info["mhz_per_cpu"] = info["hz_advertised_raw"][0] / 1.0e6
run_info["machine_config"]["cpu_info"] = cpu_info
except ImportError:
tf.compat.v1.logging.warn(
"'cpuinfo' not imported. CPU info will not be logged.")
def _collect_memory_info(run_info):
try:
# Note: psutil is not installed in the TensorFlow OSS tree.
# It is installable via pip.
import psutil # pylint: disable=g-import-not-at-top
vmem = psutil.virtual_memory()
run_info["machine_config"]["memory_total"] = vmem.total
run_info["machine_config"]["memory_available"] = vmem.available
except ImportError:
tf.compat.v1.logging.warn(
"'psutil' not imported. Memory info will not be logged.")
def _collect_test_environment(run_info):
"""Detect the local environment, eg GCE, AWS or DGX, etc."""
if cloud_lib.on_gcp():
run_info["test_environment"] = GCP_TEST_ENV
# TODO(scottzhu): Add more testing env detection for other platform
def _parse_gpu_model(physical_device_desc):
# Assume all the GPU connected are same model
for kv in physical_device_desc.split(","):
k, _, v = kv.partition(":")
if k.strip() == "name":
return v.strip()
return None
def _convert_to_json_dict(input_dict):
if input_dict:
return [{"name": k, "value": v} for k, v in sorted(input_dict.items())]
else:
return []
|
TensorFlow/Detection/SSD/models/research/object_detection/builders | builders | optimizer_builder_test | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for optimizer_builder."""
import tensorflow as tf
from google.protobuf import text_format
from object_detection.builders import optimizer_builder
from object_detection.protos import optimizer_pb2
class LearningRateBuilderTest(tf.test.TestCase):
def testBuildConstantLearningRate(self):
learning_rate_text_proto = """
constant_learning_rate {
learning_rate: 0.004
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(learning_rate.op.name.endswith('learning_rate'))
with self.test_session():
learning_rate_out = learning_rate.eval()
self.assertAlmostEqual(learning_rate_out, 0.004)
def testBuildExponentialDecayLearningRate(self):
learning_rate_text_proto = """
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 99999
decay_factor: 0.85
staircase: false
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(learning_rate.op.name.endswith('learning_rate'))
self.assertTrue(isinstance(learning_rate, tf.Tensor))
def testBuildManualStepLearningRate(self):
learning_rate_text_proto = """
manual_step_learning_rate {
initial_learning_rate: 0.002
schedule {
step: 100
learning_rate: 0.006
}
schedule {
step: 90000
learning_rate: 0.00006
}
warmup: true
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(isinstance(learning_rate, tf.Tensor))
def testBuildCosineDecayLearningRate(self):
learning_rate_text_proto = """
cosine_decay_learning_rate {
learning_rate_base: 0.002
total_steps: 20000
warmup_learning_rate: 0.0001
warmup_steps: 1000
hold_base_rate_steps: 20000
}
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
learning_rate = optimizer_builder._create_learning_rate(
learning_rate_proto)
self.assertTrue(isinstance(learning_rate, tf.Tensor))
def testRaiseErrorOnEmptyLearningRate(self):
learning_rate_text_proto = """
"""
learning_rate_proto = optimizer_pb2.LearningRate()
text_format.Merge(learning_rate_text_proto, learning_rate_proto)
with self.assertRaises(ValueError):
optimizer_builder._create_learning_rate(learning_rate_proto)
class OptimizerBuilderTest(tf.test.TestCase):
def testBuildRMSPropOptimizer(self):
optimizer_text_proto = """
rms_prop_optimizer: {
learning_rate: {
exponential_decay_learning_rate {
initial_learning_rate: 0.004
decay_steps: 800720
decay_factor: 0.95
}
}
momentum_optimizer_value: 0.9
decay: 0.9
epsilon: 1.0
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertTrue(isinstance(optimizer, tf.train.RMSPropOptimizer))
def testBuildMomentumOptimizer(self):
optimizer_text_proto = """
momentum_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.001
}
}
momentum_optimizer_value: 0.99
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertTrue(isinstance(optimizer, tf.train.MomentumOptimizer))
def testBuildAdamOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: false
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertTrue(isinstance(optimizer, tf.train.AdamOptimizer))
def testBuildMovingAverageOptimizer(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertTrue(
isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer))
def testBuildMovingAverageOptimizerWithNonDefaultDecay(self):
optimizer_text_proto = """
adam_optimizer: {
learning_rate: {
constant_learning_rate {
learning_rate: 0.002
}
}
}
use_moving_average: True
moving_average_decay: 0.2
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
optimizer, _ = optimizer_builder.build(optimizer_proto)
self.assertTrue(
isinstance(optimizer, tf.contrib.opt.MovingAverageOptimizer))
# TODO(rathodv): Find a way to not depend on the private members.
self.assertAlmostEqual(optimizer._ema._decay, 0.2)
def testBuildEmptyOptimizer(self):
optimizer_text_proto = """
"""
optimizer_proto = optimizer_pb2.Optimizer()
text_format.Merge(optimizer_text_proto, optimizer_proto)
with self.assertRaises(ValueError):
optimizer_builder.build(optimizer_proto)
if __name__ == '__main__':
tf.test.main()
|
PyTorch/Segmentation/MaskRCNN | MaskRCNN | download_dataset | DATA_DIR=$1
wget -c http://images.cocodataset.org/zips/train2017.zip
wget -c http://images.cocodataset.org/zips/val2017.zip
wget -c http://images.cocodataset.org/annotations/annotations_trainval2017.zip
if md5sum -c hashes.md5
then
echo "DOWNLOAD PASSED"
# mkdir $DATA_DIR
mv train2017.zip $DATA_DIR
mv val2017.zip $DATA_DIR
mv annotations_trainval2017.zip $DATA_DIR
cd $DATA_DIR
dtrx --one=here annotations_trainval2017.zip
dtrx train2017.zip
dtrx val2017.zip
echo "EXTRACTION COMPLETE"
else
echo "DOWNLOAD FAILED HASHCHECK"
fi
|
TensorFlow2/Detection/Efficientdet/scripts/D0 | D0 | convergence-TF32-8xA100-80G | #!/bin/bash
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bs=104
ep=400
lr=1.1
wu=25
ema=0.999
momentum=0.93
mkdir -p /tmp/convergence-TF32-8xA100-80G
curr_dt=`date +"%Y-%m-%d-%H-%M-%S"`
mpirun -np 8 --allow-run-as-root --bind-to none \
-map-by slot -x LD_LIBRARY_PATH -x PATH \
-mca pml ob1 -mca btl ^openib \
-x CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
python3 train.py \
--training_mode=${training_mode:=train} \
--training_file_pattern=/workspace/coco/train-* \
--val_file_pattern=/workspace/coco/val-* \
--val_json_file=/workspace/coco/annotations/instances_val2017.json \
--model_name=efficientdet-d0 \
--model_dir=/tmp/convergence-TF32-8xA100-80G \
--backbone_init=/workspace/checkpoints/efficientnet-b0-joc \
--batch_size=$bs \
--eval_batch_size=$bs \
--num_epochs=$ep \
--use_xla=True \
--amp=False \
--lr=$lr \
--warmup_epochs=$wu \
--hparams="moving_average_decay=$ema,momentum=$momentum" \
2>&1 | tee /tmp/convergence-TF32-8xA100-80G/train-$curr_dt.log |
TensorFlow/Segmentation/UNet_Medical/utils | utils | model_fn | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import horovod.tensorflow as hvd
import tensorflow as tf
from model.unet import unet_v1
# Class Dice coefficient averaged over batch
def dice_coef(predict, target, axis=1, eps=1e-6):
intersection = tf.reduce_sum(predict * target, axis=axis)
union = tf.reduce_sum(predict * predict + target * target, axis=axis)
dice = (2. * intersection + eps) / (union + eps)
return tf.reduce_mean(dice, axis=0) # average over batch
def regularization_l2loss(weight_decay):
def loss_filter_fn(name):
"""we don't need to compute L2 loss for BN"""
return all([
tensor_name not in name.lower()
for tensor_name in ["batchnorm", "batch_norm", "batch_normalization"]
])
filtered_params = [tf.cast(v, tf.float32) for v in tf.trainable_variables() if loss_filter_fn(v.name)]
if len(filtered_params) != 0:
l2_loss_per_vars = [tf.nn.l2_loss(v) for v in filtered_params]
l2_loss = tf.multiply(tf.add_n(l2_loss_per_vars), weight_decay)
else:
l2_loss = tf.zeros(shape=(), dtype=tf.float32)
return l2_loss
def unet_fn(features, labels, mode, params):
""" Model function for tf.Estimator
Controls how the training is performed by specifying how the
total_loss is computed and applied in the backward pass.
Args:
features (tf.Tensor): Tensor samples
labels (tf.Tensor): Tensor labels
mode (tf.estimator.ModeKeys): Indicates if we train, evaluate or predict
params (dict): Additional parameters supplied to the estimator
Returns:
Appropriate tf.estimator.EstimatorSpec for the current mode
"""
dtype = tf.float32
device = '/gpu:0'
global_step = tf.compat.v1.train.get_global_step()
with tf.device(device):
features = tf.cast(features, dtype)
output_map = unet_v1(features=features, mode=mode)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {'logits': tf.nn.softmax(output_map, axis=-1)}
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
n_classes = output_map.shape[-1].value
flat_logits = tf.reshape(tf.cast(output_map, tf.float32),
[tf.shape(output_map)[0], -1, n_classes])
flat_labels = tf.reshape(labels,
[tf.shape(output_map)[0], -1, n_classes])
crossentropy_loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels), name='cross_loss_ref')
dice_loss = tf.reduce_mean(1 - dice_coef(tf.keras.activations.softmax(flat_logits, axis=-1),
flat_labels), name='dice_loss_ref')
total_loss = tf.add(crossentropy_loss, dice_loss, name="total_loss_ref")
if mode == tf.estimator.ModeKeys.EVAL:
eval_metric_ops = {"eval_ce_loss": tf.compat.v1.metrics.mean(crossentropy_loss),
"eval_dice_loss": tf.compat.v1.metrics.mean(dice_loss),
"eval_total_loss": tf.compat.v1.metrics.mean(total_loss),
"eval_dice_score": tf.compat.v1.metrics.mean(1.0 - dice_loss)}
return tf.estimator.EstimatorSpec(mode=mode, loss=dice_loss, eval_metric_ops=eval_metric_ops)
opt = tf.compat.v1.train.AdamOptimizer(learning_rate=params.learning_rate)
opt = hvd.DistributedOptimizer(opt, device_dense='/gpu:0')
with tf.control_dependencies(tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)):
deterministic = True
gate_gradients = (
tf.compat.v1.train.Optimizer.GATE_OP
if deterministic
else tf.compat.v1.train.Optimizer.GATE_NONE)
train_op = opt.minimize(total_loss, gate_gradients=gate_gradients, global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op,
eval_metric_ops={})
|
TensorFlow/Translation/GNMT/scripts | scripts | wmt16_en_de | #!/usr/bin/env bash
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
export LANG=C.UTF-8
export LC_ALL=C.UTF-8
OUTPUT_DIR=${1:-"data/wmt16_de_en"}
echo "Writing to ${OUTPUT_DIR}. To change this, set the OUTPUT_DIR environment variable."
OUTPUT_DIR_DATA="${OUTPUT_DIR}/data"
mkdir -p $OUTPUT_DIR_DATA
echo "Downloading Europarl v7. This may take a while..."
curl -o ${OUTPUT_DIR_DATA}/europarl-v7-de-en.tgz \
http://www.statmt.org/europarl/v7/de-en.tgz
echo "Downloading Common Crawl corpus. This may take a while..."
curl -o ${OUTPUT_DIR_DATA}/common-crawl.tgz \
http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz
echo "Downloading News Commentary v11. This may take a while..."
curl -o ${OUTPUT_DIR_DATA}/nc-v11.tgz \
http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz
echo "Downloading dev/test sets"
curl -o ${OUTPUT_DIR_DATA}/dev.tgz \
http://data.statmt.org/wmt16/translation-task/dev.tgz
curl -o ${OUTPUT_DIR_DATA}/test.tgz \
http://data.statmt.org/wmt16/translation-task/test.tgz
# Extract everything
echo "Extracting all files..."
mkdir -p "${OUTPUT_DIR_DATA}/europarl-v7-de-en"
tar -xvzf "${OUTPUT_DIR_DATA}/europarl-v7-de-en.tgz" -C "${OUTPUT_DIR_DATA}/europarl-v7-de-en"
mkdir -p "${OUTPUT_DIR_DATA}/common-crawl"
tar -xvzf "${OUTPUT_DIR_DATA}/common-crawl.tgz" -C "${OUTPUT_DIR_DATA}/common-crawl"
mkdir -p "${OUTPUT_DIR_DATA}/nc-v11"
tar -xvzf "${OUTPUT_DIR_DATA}/nc-v11.tgz" -C "${OUTPUT_DIR_DATA}/nc-v11"
mkdir -p "${OUTPUT_DIR_DATA}/dev"
tar -xvzf "${OUTPUT_DIR_DATA}/dev.tgz" -C "${OUTPUT_DIR_DATA}/dev"
mkdir -p "${OUTPUT_DIR_DATA}/test"
tar -xvzf "${OUTPUT_DIR_DATA}/test.tgz" -C "${OUTPUT_DIR_DATA}/test"
# Concatenate Training data
cat "${OUTPUT_DIR_DATA}/europarl-v7-de-en/europarl-v7.de-en.en" \
"${OUTPUT_DIR_DATA}/common-crawl/commoncrawl.de-en.en" \
"${OUTPUT_DIR_DATA}/nc-v11/training-parallel-nc-v11/news-commentary-v11.de-en.en" \
> "${OUTPUT_DIR}/train.en"
wc -l "${OUTPUT_DIR}/train.en"
cat "${OUTPUT_DIR_DATA}/europarl-v7-de-en/europarl-v7.de-en.de" \
"${OUTPUT_DIR_DATA}/common-crawl/commoncrawl.de-en.de" \
"${OUTPUT_DIR_DATA}/nc-v11/training-parallel-nc-v11/news-commentary-v11.de-en.de" \
> "${OUTPUT_DIR}/train.de"
wc -l "${OUTPUT_DIR}/train.de"
# Clone Moses
if [ ! -d "${OUTPUT_DIR}/mosesdecoder" ]; then
echo "Cloning moses for data processing"
git clone https://github.com/moses-smt/mosesdecoder.git "${OUTPUT_DIR}/mosesdecoder"
cd ${OUTPUT_DIR}/mosesdecoder
git reset --hard 8c5eaa1a122236bbf927bde4ec610906fea599e6
cd -
fi
# Convert SGM files
# Convert newstest2014 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2014-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2014.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2014-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2014.en
# Convert newstest2015 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2015-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2015.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/dev/dev/newstest2015-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/dev/dev/newstest2015.en
# Convert newstest2016 data into raw text format
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/test/test/newstest2016-deen-src.de.sgm \
> ${OUTPUT_DIR_DATA}/test/test/newstest2016.de
${OUTPUT_DIR}/mosesdecoder/scripts/ems/support/input-from-sgm.perl \
< ${OUTPUT_DIR_DATA}/test/test/newstest2016-deen-ref.en.sgm \
> ${OUTPUT_DIR_DATA}/test/test/newstest2016.en
# Copy dev/test data to output dir
cp ${OUTPUT_DIR_DATA}/dev/dev/newstest20*.de ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/dev/dev/newstest20*.en ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/test/test/newstest20*.de ${OUTPUT_DIR}
cp ${OUTPUT_DIR_DATA}/test/test/newstest20*.en ${OUTPUT_DIR}
# Tokenize data
for f in ${OUTPUT_DIR}/*.de; do
echo "Tokenizing $f..."
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -q -l de -threads 8 < $f > ${f%.*}.tok.de
done
for f in ${OUTPUT_DIR}/*.en; do
echo "Tokenizing $f..."
${OUTPUT_DIR}/mosesdecoder/scripts/tokenizer/tokenizer.perl -q -l en -threads 8 < $f > ${f%.*}.tok.en
done
# Clean all corpora
for f in ${OUTPUT_DIR}/*.en; do
fbase=${f%.*}
echo "Cleaning ${fbase}..."
${OUTPUT_DIR}/mosesdecoder/scripts/training/clean-corpus-n.perl $fbase de en "${fbase}.clean" 1 80
done
# Create dev dataset
cat "${OUTPUT_DIR}/newstest2015.tok.clean.en" \
"${OUTPUT_DIR}/newstest2016.tok.clean.en" \
> "${OUTPUT_DIR}/newstest_dev.tok.clean.en"
cat "${OUTPUT_DIR}/newstest2015.tok.clean.de" \
"${OUTPUT_DIR}/newstest2016.tok.clean.de" \
> "${OUTPUT_DIR}/newstest_dev.tok.clean.de"
# Filter datasets
python3 scripts/filter_dataset.py \
-f1 ${OUTPUT_DIR}/train.tok.clean.en \
-f2 ${OUTPUT_DIR}/train.tok.clean.de
python3 scripts/filter_dataset.py \
-f1 ${OUTPUT_DIR}/newstest_dev.tok.clean.en \
-f2 ${OUTPUT_DIR}/newstest_dev.tok.clean.de
# Generate Subword Units (BPE)
# Clone Subword NMT
if [ ! -d "${OUTPUT_DIR}/subword-nmt" ]; then
git clone https://github.com/rsennrich/subword-nmt.git "${OUTPUT_DIR}/subword-nmt"
cd ${OUTPUT_DIR}/subword-nmt
git reset --hard 48ba99e657591c329e0003f0c6e32e493fa959ef
cd -
fi
# Learn Shared BPE
for merge_ops in 32000; do
echo "Learning BPE with merge_ops=${merge_ops}. This may take a while..."
cat "${OUTPUT_DIR}/train.tok.clean.de" "${OUTPUT_DIR}/train.tok.clean.en" | \
${OUTPUT_DIR}/subword-nmt/learn_bpe.py -s $merge_ops > "${OUTPUT_DIR}/bpe.${merge_ops}"
echo "Apply BPE with merge_ops=${merge_ops} to tokenized files..."
for lang in en de; do
for f in ${OUTPUT_DIR}/*.tok.${lang} ${OUTPUT_DIR}/*.tok.clean.${lang}; do
outfile="${f%.*}.bpe.${merge_ops}.${lang}"
${OUTPUT_DIR}/subword-nmt/apply_bpe.py -c "${OUTPUT_DIR}/bpe.${merge_ops}" < $f > "${outfile}"
echo ${outfile}
done
done
# Create vocabulary file for BPE
cat "${OUTPUT_DIR}/train.tok.clean.bpe.${merge_ops}.en" "${OUTPUT_DIR}/train.tok.clean.bpe.${merge_ops}.de" | \
${OUTPUT_DIR}/subword-nmt/get_vocab.py | cut -f1 -d ' ' > "${OUTPUT_DIR}/vocab.bpe.${merge_ops}"
done
# Duplicate vocab file with language suffix
cp "${OUTPUT_DIR}/vocab.bpe.32000" "${OUTPUT_DIR}/vocab.bpe.32000.en"
cp "${OUTPUT_DIR}/vocab.bpe.32000" "${OUTPUT_DIR}/vocab.bpe.32000.de"
echo "All done."
|
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda | bermuda | onnx | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.optimizer
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
# pytype: enable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple([_get_dim(d) for d in shape.dim])
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
|
PaddlePaddle/LanguageModeling/BERT | BERT | README | # BERT for PaddlePaddle
This repository provides a script and recipe to train the BERT model for PaddlePaddle to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA.
## Table Of Contents
- [Model overview](#model-overview)
* [Model architecture](#model-architecture)
* [Default configuration](#default-configuration)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Glossary](#glossary)
- [Setup](#setup)
* [Requirements](#requirements)
- [Quick Start Guide](#quick-start-guide)
- [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Parameters](#parameters)
* [Pre-training parameters](#pre-training-parameters)
* [Fine tuning parameters](#fine-tuning-parameters)
* [Multi-node](#multi-node)
* [Command-line options](#command-line-options)
* [Getting the data](#getting-the-data)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Pre-training](#pre-training)
* [Fine-tuning](#fine-tuning)
* [Inference process](#inference-process)
- [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Pre-training loss results: NVIDIA DGX A100 (8x A100 80GB)](#pre-training-loss-results-nvidia-dgx-a100-8x-a100-80gb)
* [Pre-training loss curves](#pre-training-loss-curves)
* [Fine-tuning accuracy results: NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-accuracy-results-nvidia-dgx-a100-8x-a100-80gb)
* [Training stability test](#training-stability-test)
* [Pre-training stability test](#pre-training-stability-test)
* [Fine-tuning stability test](#fine-tuning-stability-test)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Pre-training NVIDIA DGX A100 (8x A100 80GB)](#pre-training-nvidia-dgx-a100-8x-a100-80gb)
* [Pre-training NVIDIA DGX A100 (8x A100 80GB) Multi-node Scaling](#pre-training-nvidia-dgx-a100-8x-a100-80gb-multi-node-scaling)
* [Fine-tuning NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-nvidia-dgx-a100-8x-a100-80gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb)
* [Fine-tuning inference on NVIDIA DGX A100 (1x A100 80GB)](#fine-tuning-inference-on-nvidia-dgx-a100-1x-a100-80gb)
- [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
Bidirectional Encoder Representations from Transformers (BERT) is a new method of pre-training language representations that obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. This model is based on the [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper.
This repository contains scripts to interactively launch data download, training, benchmarking, and inference routines in a Docker container for pre-training and fine-tuning tasks such as question answering. The major differences between the original implementation of the paper and this version of BERT are as follows:
- Scripts to download the Wikipedia dataset
- Scripts to preprocess downloaded data into inputs and targets for pre-training in a modular fashion
- [LAMB](https://arxiv.org/pdf/1904.00962.pdf) optimizer to support training with larger batches
- Adam optimizer for fine-tuning tasks
- Automatic mixed precision (AMP) training support
Other publicly available implementations of BERT include:
1. [NVIDIA PyTorch](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/BERT)
2. [NVIDIA TensorFlow](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/LanguageModeling/BERT)
3. [Hugging Face](https://github.com/huggingface/pytorch-pretrained-BERT)
4. [codertimo](https://github.com/codertimo/BERT-pytorch)
5. [gluon-nlp](https://github.com/dmlc/gluon-nlp/tree/v0.10.x/scripts/bert)
6. [Google's implementation](https://github.com/google-research/bert)
This model trains with mixed precision Tensor Cores on NVIDIA Ampere and provides a push-button solution to pre-training on a corpus of choice. As a result, researchers can get results 4x faster than training without Tensor Cores. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
### Model architecture
The BERT model uses the same architecture as the encoder of the Transformer. Input sequences are projected into an embedding space before being fed into the encoder structure. Additionally, positional and segment encodings are added to the embeddings to preserve positional information. The encoder structure is simply a stack of Transformer blocks, which consist of a multi-head attention layer followed by successive stages of feed-forward networks and layer normalization. The multi-head attention layer accomplishes self-attention on multiple input representations.
An illustration of the architecture taken from the [Transformer paper](https://arxiv.org/pdf/1706.03762.pdf) is shown below.
![BERT](images/model.png)
### Default configuration
The architecture of the BERT model is almost identical to the Transformer model that was first introduced in the [Attention Is All You Need paper](https://arxiv.org/pdf/1706.03762.pdf). The main innovation of BERT lies in the pre-training step, where the model is trained on two unsupervised prediction tasks using a large text corpus. Training on these unsupervised tasks produces a generic language model, which can then be quickly fine-tuned to achieve state-of-the-art performance on language processing tasks such as question answering.
The BERT paper reports the results for two configurations of BERT, each corresponding to a unique model size. This implementation provides the same default configurations, which are described in the table below.
| **Model** | **Hidden layers** | **Hidden unit size** | **Attention heads** | **Feedforward filter size** | **Max sequence length** | **Parameters** |
|:---------:|:-----------------:|:--------------------:|:-------------------:|:---------------------------:|:-----------------------:|:--------------:|
| BERTBASE | 12 encoder | 768 | 12 | 4 x 768 | 512 | 110M |
| BERTLARGE | 24 encoder | 1024 | 16 | 4 x 1024 | 512 | 330M |
### Feature support matrix
The following features are supported by this model.
| **Feature** | **BERT** |
|:-----------:|:--------:|
| [Paddle AMP](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/performance_improving/amp_en.html) | Yes |
| [Paddle Fleet](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/distributed/fleet/Fleet_en.html#fleet) | Yes |
| [LAMB](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/optimizer/Lamb_en.html) | Yes |
| [LDDL](https://github.com/NVIDIA/LDDL) | Yes |
| Multi-node | Yes |
#### Features
[Fleet](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/distributed/fleet/Fleet_en.html#fleet) is a unified API for distributed training of PaddlePaddle.
[LAMB](https://arxiv.org/pdf/1904.00962.pdf) stands for Layerwise Adaptive Moments based optimizer, which is a large batch optimization technique that helps accelerate the training of deep neural networks using large minibatches. It allows using a global batch size of 65536 and 32768 on sequence lengths 128 and 512, respectively, compared to a batch size of 256 for [Adam](https://arxiv.org/pdf/1412.6980.pdf). The optimized implementation accumulates 1024 gradient batches in phase 1 and 4096 steps in phase 2 before updating weights once. This results in a 15% training speedup. On multi-node systems, LAMB allows scaling up to 1024 GPUs resulting in training speedups of up to 72x in comparison to Adam. Adam has limitations on the learning rate that can be used since it is applied globally on all parameters, whereas LAMB follows a layerwise learning rate strategy.
[LDDL](https://github.com/NVIDIA/LDDL) is a library that enables scalable data preprocessing and loading. LDDL is used by this PaddlePaddle BERT example.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision in PaddlePaddle, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Automatic Mixed Precision Training](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/01_paddle2.0_introduction/basic_concept/amp_en.html#automatic-mixed-precision-training) documentation.
- Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
#### Enabling mixed precision
Mixed precision is enabled in Paddle by using the Automatic Mixed Precision (AMP) while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients.
In PaddlePaddle, loss scaling can be easily applied by passing in arguments to [GradScaler()](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/amp/GradScaler_en.html). The scaling value to be used can be dynamic or fixed.
For an in-depth walkthrough on AMP, check out sample usage [here](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/01_paddle2.0_introduction/basic_concept/amp_en.html). Paddle AMP is a PaddlePaddle built-in module that provides functions to construct AMP workflow. The details can be found in [Automatic Mixed Precision (AMP)](https://www.paddlepaddle.org.cn/documentation/docs/en/guides/01_paddle2.0_introduction/basic_concept/amp_en.html#automatic-mixed-precision-training), which requires minimal network code changes to leverage Tensor Cores performance.
Code example to enable mixed precision for static graph:
- Use `paddle.static.amp.decorate` to wrap optimizer
```python
import paddle.static.amp as amp
mp_optimizer = amp.decorate(optimizer=optimizer, init_loss_scaling=8.0)
```
- Minimize `loss` , and get `scaled_loss`, which is useful when you need customized loss.
```python
ops, param_grads = mp_optimizer.minimize(loss)
scaled_loss = mp_optimizer.get_scaled_loss()
```
- For distributed training, it is recommended to use Fleet to enable amp, which is a unified API for distributed training of PaddlePaddle. For more information, refer to [Fleet](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/distributed/fleet/Fleet_en.html#fleet)
```python
import paddle.distributed.fleet as fleet
strategy = fleet.DistributedStrategy()
strategy.amp = True # by default this is false
optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on NVIDIA Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models, which require a high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
### Glossary
**Fine-tuning**
Training an already pre-trained model further using a task-specific dataset for subject-specific refinements by adding task-specific layers on top if required.
**Language Model**
Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence.
**Pre-training**
Training a model on vast amounts of data on the same (or different) task to build general understandings.
**Transformer**
The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer that uses an attention mechanism and transforms one sequence into another.
**Phase 1**
Pre-training on samples of sequence length 128 and 20 masked predictions per sequence.
**Phase 2**
Pre-training on samples of sequence length 512 and 80 masked predictions per sequence.
## Setup
The following section lists the requirements you need to meet to start training the BERT model.
### Requirements
This repository contains a Dockerfile that extends the CUDA NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PaddlePaddle 22.12-py3 NGC container](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/paddlepaddle) or newer
* Supported GPUs:
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, refer to the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
* [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
For those unable to use the PaddlePaddle NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/dgx/support-matrix/index.html).
For multi-node, the sample provided in this repository requires [Enroot](https://github.com/NVIDIA/enroot) and [Pyxis](https://github.com/NVIDIA/pyxis) set up on a [SLURM](https://slurm.schedmd.com) cluster.
More information on how to set up and launch can be found in the [Multi-node Documentation](https://docs.nvidia.com/ngc/multi-node-bert-user-guide).
## Quick Start Guide
To train your model using mixed or TF32 precision with Tensor Cores, perform the following steps using the default parameters of the BERT model. Training configurations to run on 8 x A100 cards and examples of usage are provided at the end of this section. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section.
1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples.git
cd DeepLearningExamples/PaddlePaddle/LanguageModeling/BERT
```
2. Download the NVIDIA pre-trained checkpoint.
If you want to use a pre-trained checkpoint, visit [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_large_paddle_ckpt_mode-pretrain/files). This pre-trained checkpoint is used to fine-tune on SQuAD. Ensure you unzip the downloaded file and place the checkpoint in the `checkpoints/` folder. For a checkpoint already fine-tuned for QA on SQuAD v1.1 visit [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_large_paddle_ckpt_mode-qa_ds-squad11/files).
3. Build BERT on top of the NGC container.
```
bash scripts/docker/build.sh
```
4. Start an interactive session in the NGC container to run training/inference.
```
bash scripts/docker/launch.sh
```
By default:
- Checkpoints of pretraining and fine-tuning routines are stored in the `results/` folder.
- Paddle native logs are stored in the `log/` folder.
- DLLogger's outputs are stored in the `results/` folder.
5. Download the dataset.
This repository provides scripts to download, verify, and extract the following datasets:
- [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) (fine-tuning for question answering)
- Wikipedia (pre-training)
To download, verify, extract the datasets, run:
```shell
bash data/create_datasets_from_start.sh
```
Note: For fine-tuning only, downloading the Wikipedia dataset can be skipped by commenting it out.
Note: Ensure a complete Wikipedia download. But if the download failed in LDDL,
remove the output directory `data/wikipedia/` and start over again.
6. Start pre-training.
To run on a single node 8 x A100 80G cards from within the container, you can use the following script to run pre-training.
```
bash scripts/run_pretraining.sh
```
The default hyperparameters are set to run on 8x A100 80G cards.
To run on multiple nodes, refer to the [Multi-node](#multi-node) section.
7. Start fine-tuning with the SQuAD dataset.
The above pre-trained BERT representations can be fine-tuned with just one additional output layer for a state-of-the-art question answering system. Running the following script launches fine-tuning for question answering with the SQuAD dataset.
```
bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>
```
8. Start validation/evaluation.
For SQuAD, validation can be performed with the `bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>`, setting `mode` to `eval` in `scripts/run_squad.sh` as follows:
```
mode=${12:-"eval"}
```
9. Start inference/predictions.
Inference can be performed with the `bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>`, setting `mode` to `prediction` in `scripts/run_squad.sh` as follows:
```
mode=${12:-"prediction"}
```
Note:
- Both in `prediction` and `eval` mode, the inference process will be performed and the prediction results will be saved into `<OUT_DIR>/bert-large-uncased/squad/predictions.json`, set in `scripts/run_squad.sh` as follows:
```
OUT_DIR=${11:-"/results"} # For SQuAD.
```
- In `eval` mode, after the inference process is completed, the script will further evalute the `predictions.json` on the test dateset and output two metrics: the average `exact match` score and the average `F1` score.
- `predictions.json` only contains a dict of mapping the questions' id to their predicted answers. For example:
```json
{
"56be4db0acb8001400a502ec": "Denver Broncos",
"56be4db0acb8001400a502ed": "Carolina Panthers",
}
```
- All the reference (such as contexts, questions, answers) can be located in test dataset (`dev-v1.1.json`) according to the unique `id`. For example:
```json
{
"answers": [{"answer_start": 177, "text": "Denver Broncos"}, {"answer_start": 177, "text": "Denver Broncos"}, {"answer_start": 177, "text": "Denver Broncos"}],
"question": "Which NFL team represented the AFC at Super Bowl 50?",
"id": "56be4db0acb8001400a502ec"
}
```
This repository contains some predefined configurations to run the pre-training and SQuAD on NVIDIA DGX A100 nodes in `scripts/configs/pretrain_config.sh` and `scripts/configs/squad_config.sh`. For example, to use the default DGX A100 8 gpu config, run:
```
bash scripts/run_pretraining.sh $(source scripts/configs/pretrain_config.sh && dgxa100-80g_8gpu_amp) # For pre-training with mixed precision.
bash scripts/run_pretraining.sh $(source scripts/configs/pretrain_config.sh && dgxa100-80g_8gpu_tf32) # For pre-training with TF32 precision.
bash scripts/run_squad.sh $(source scripts/configs/squad_config.sh && dgxa100-80g_8gpu_amp) # For the SQuAD v1.1 dataset with mixed precision.
bash scripts/run_squad.sh $(source scripts/configs/squad_config.sh && dgxa100-80g_8gpu_tf32) # For the SQuAD v1.1 dataset with TF32 precision.
```
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
Descriptions of the key scripts and folders are provided below.
- `data/` - Contains scripts for downloading and preparing individual datasets and will contain downloaded and processed datasets.
- `scripts/` - Contains shell scripts to launch data download, pre-training, and fine-tuning.
- `run_squad.sh` - Interface for launching question answering fine-tuning with `run_squad.py`.
- `run_pretraining.sh` - Interface for launching BERT pre-training with `run_pretraining.py`.
- `create_pretraining_data.py` - Creates `.hdf5` files from shared text files in the final step of dataset creation.
- `modeling.py` - Implements the BERT pre-training and fine-tuning model architectures with PaddlePaddle.
- `optimizer.py` - Implements the LAMB optimizer with PaddlePaddle.
- `tokenizer.py` - Implements the BERT tokenizer.
- `loss.py` - Implement the loss function for BERT pre-training and fine-tuning.
- `run_squad.py` - Implements fine-tuning training and evaluation for question answering on the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset.
- `run_pretraining.py` - Implements BERT pre-training.
### Parameters
#### Pre-training parameters
BERT is designed to pre-train deep bidirectional networks for language representations. The following scripts replicate pre-training on Wikipedia from this [paper](https://arxiv.org/pdf/1810.04805.pdf). These scripts are general and can be used for pre-training language representations on any corpus of choice.
The complete list of the available parameters for the `run_pretraining.py` script is :
```
Global:
--input-dir INPUT_DIR
The input data directory. Should be specified by users and contain .hdf5 files for the task. (default: None)
--vocab-file VOCAB_FILE
Vocabulary mapping/file BERT was pretrainined on. (default: None)
--output-dir OUTPUT_DIR
The output directory where the model checkpoints will be written. Should be specified by users. (default: None)
--bert-model {bert-base-uncased,bert-base-cased,bert-large-uncased,bert-large-cased,custom}
Specifies the type of BERT model to use. If it is set as custom, the path to the config file must be given by specifying --config-file (default: bert-large-uncased)
--config-file CONFIG_FILE
The BERT model config. If set to None, `<--bert-model>.json` in folder `bert_configs` will be used. (default: None)
--max-steps MAX_STEPS
Total number of training steps to perform. (default: None)
--log-freq LOG_FREQ Frequency of logging loss. (default: 10)
--num-steps-per-checkpoint NUM_STEPS_PER_CHECKPOINT
Number of update steps until a model checkpoint is saved to disk. (default: 100)
--from-pretrained-params FROM_PRETRAINED_PARAMS
Path to pretrained parameters. If set to None, no pretrained parameter will be used. (default: None)
--from-checkpoint FROM_CHECKPOINT
A checkpoint path to resume training. If set to None, no checkpoint will be used. If not None, --from-pretrained-params will be ignored. (default: None)
--last-step-of-checkpoint LAST_STEP_OF_CHECKPOINT
The step id of the checkpoint given by --from-checkpoint. It should be None, auto, or integer > 0. If it is set as None, then training will start from the 1-th epoch. If it is set as auto,
then it will search the largest integer-convertible folder --from-checkpoint, which contains the required checkpoint. (default: None)
--from-phase1-final-params FROM_PHASE1_FINAL_PARAMS
Path to final checkpoint of phase1, which will be used to initialize the parameter in the first step of phase2, and ignored in the rest steps of phase2. (default: None)
--seed SEED Random seed. (default: 42)
--report-file REPORT_FILE
A file in which to store JSON experiment reports. (default: ./report.json)
--model-prefix MODEL_PREFIX
The prefix name of model files to save/load. (default: bert_paddle)
--show-config SHOW_CONFIG
To show arguments. (default: True)
--enable-cpu-affinity ENABLE_CPU_AFFINITY
To enable in-built GPU-CPU affinity. (default: True)
--benchmark To enable benchmark mode. (default: False)
--benchmark-steps BENCHMARK_STEPS
Steps for a benchmark run, only applied when --benchmark is set. (default: 20)
--benchmark-warmup-steps BENCHMARK_WARMUP_STEPS
Warmup steps for a benchmark run, only applied when --benchmark is set. (default: 20)
Training:
--optimizer OPTIMIZER
The name of optimizer. It should be one of {Lamb, AdamW}. (default: Lamb)
--gradient-merge-steps GRADIENT_MERGE_STEPS
Number of update steps to accumulate before performing a backward/update pass. (default: 1)
--learning-rate LEARNING_RATE
The initial learning rate. (default: 0.0001)
--warmup-start-lr WARMUP_START_LR
The initial learning rate for warm up. (default: 0.0)
--warmup-proportion WARMUP_PROPORTION
Proportion of training to perform linear learning rate warmup for. For example, 0.1 = 10% of training. (default: 0.01)
--beta1 BETA1 The exponential decay rate for the 1st moment estimates. (default: 0.9)
--beta2 BETA2 The exponential decay rate for the 2st moment estimates. (default: 0.999)
--epsilon EPSILON A small float value for numerical stability. (default: 1e-06)
--weight-decay WEIGHT_DECAY
The weight decay coefficient. (default: 0.01)
--max-seq-length MAX_SEQ_LENGTH
The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded. (default: 512)
--batch-size BATCH_SIZE
The batch size for training (default: 32)
--phase1 The phase of BERT pretraining. It should not be set with --phase2 at the same time. (default: False)
--phase2 The phase of BERT pretraining. It should not be set with --phase1 at the same time. (default: False)
--max-predictions-per-seq MAX_PREDICTIONS_PER_SEQ
The maximum total of masked tokens in the input sequence (default: 80)
Advanced Training:
--amp Enable automatic mixed precision training (AMP). (default: False)
--scale-loss SCALE_LOSS
The loss scalar for AMP training, only applied when --amp is set. (default: 1.0)
--use-dynamic-loss-scaling
Enable dynamic loss scaling in AMP training, only applied when --amp is set. (default: False)
--use-pure-fp16 Enable pure FP16 training, only applied when --amp is set. (default: False)
--fuse-mha Enable multihead attention fusion. Require cudnn version >= 8.9.1.
```
#### Fine tuning parameters
* SQuAD
Default arguments are listed below in the order `scripts/run_squad.sh` expects:
- Initial checkpoint - The default is `checkpoints/squad`.
- Number of training Epochs - The default is `2`.
- Batch size - The default is `32`.
- Learning rate - The default is `4.6e-5`.
- Warmup proportion - The default is `0.2`.
- Precision (either `amp` or `fp32`) - The default is `amp`.
- Number of GPUs - The default is `8`.
- Seed - The default is `1`.
- SQuAD directory - The default is `data/download/squad/v1.1`.
- Vocabulary file (token to ID mapping) - The default is `vocab/bert-large-uncased-vocab.txt`.
- Output directory for results - The default is `/results`.
- Mode (`train`, `eval`, `prediction`, `train_eval`) - The default is `train_eval`.
- Config file for the BERT model (It should be the same as the pre-trained model) - The default is `None`, which means `bert_configs/<--bert-model>.json` will be used.
- Max steps - The default is -1.
- Enable benchmark - The default is `false`.
- Benchmark steps - The default is `100`.
- Benchmark warmup steps - The default is `100`.
- Fuse MHA fusion - The default is `true`
The script saves the final checkpoint to the `/results/bert-large-uncased/squad` folder.
Note:
- For SQuAD fine-tuning, `<--max-steps>` is not required since it's usually trained for two or three epochs. If `<--max-steps>` is not set or set to -1, it will be trained for `<--epochs>` epochs. If `<--max-steps>` is set to a positive number, the total training steps is calculated by: `total_steps = min(max_steps, epochs * steps_per_epoch)`.
- For pre-training, `<--max-steps>` is required and `<--epochs>` is deprecated. Because We typically train for a specified number of steps rather than epochs.
#### Multi-node
Multi-node runs can be launched on a pyxis/enroot Slurm cluster (refer to [Requirements](#requirements)) with the `run.sub` script with the following command for a 4-node DGX-A100 example for both phase 1 and phase 2:
```
TRAIN_BATCH_SIZE=256 GRADIENT_ACCUMULATION_STEPS=8 PHASE=1 sbatch -N4 run.sub
TRAIN_BATCH_SIZE=32 GRADIENT_ACCUMULATION_STEPS=32 PHASE=2 sbatch -N4 run.sub
```
Checkpoints after phase 1 will be saved in `checkpointdir` specified in `run.sub`. The checkpoint will be automatically picked up to resume training on phase 2. Note that phase 2 should be run after phase 1.
The batch variables `BATCHSIZE`, `GRADIENT_STEPS`,`PHASE` refer to the Python arguments `--batch-size`, `--gradient-merge-steps`, `--phase1/--phase2` respectively.
Note that the `run.sub` script is a starting point that has to be adapted depending on the environment. In particular, variables such as `datadir` handle the location of the files for each phase.
Refer to the file’s contents to find the full list of variables to adjust for your system.
### Command-line options
To view the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python run_pretraining.py --help`
`python run_squad.py --help`
Detailed descriptions of command-line options can be found in the [Parameters](#parameters) section.
### Getting the data
For pre-training BERT, we use the Wikipedia (2500M words) dataset. We extract
only the text passages and ignore headers, lists, and tables. BERT requires that
datasets are structured as a document level corpus rather than a shuffled
sentence-level corpus because it is critical to extract long contiguous
sentences. `data/create_datasets_from_start.sh` uses the LDDL downloader to
download the Wikipedia dataset, and `scripts/run_pretraining.sh` uses the LDDL
preprocessor and load balancer to preprocess the Wikipedia dataset into Parquet
shards which are then streamed during the pre-training by the LDDL data loader.
Refer to [LDDL's README](https://github.com/NVIDIA/LDDL/blob/main/README.md) for more
information on how to use LDDL. Depending on the speed of your internet
connection, downloading and extracting the Wikipedia dataset takes a few hours,
and running the LDDL preprocessor and load balancer takes half an hour on a
single DGXA100 node.
For fine-tuning a pre-trained BERT model for specific tasks, by default, this repository prepares the following dataset:
- [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/): for question answering
#### Dataset guidelines
The procedure to prepare a text corpus for pre-training is described in the above section. This section provides additional insight into how exactly raw text is processed so that it is ready for pre-training.
First, raw text is tokenized using [WordPiece tokenization](https://arxiv.org/pdf/1609.08144.pdf). A [CLS] token is inserted at the start of every sequence, and the two sentences in the sequence are separated by a [SEP] token.
Note: BERT pre-training looks at pairs of sentences at a time. A sentence embedding token [A] is added to the first sentence and token [B] to the next.
BERT pre-training optimizes for two unsupervised classification tasks. The first is Masked Language Modeling (Masked LM). One training instance of Masked LM is a single modified sentence. Each token in the sentence has a 15% chance of being replaced by a [MASK] token. The chosen token is replaced with [MASK] 80% of the time, 10% with a random token and the remaining 10% the token is retained. The task is then to predict the original token.
The second task is next sentence prediction. One training instance of BERT pre-training is two sentences (a sentence pair). A sentence pair may be constructed by simply taking two adjacent sentences from a single document or by pairing up two random sentences with equal probability. The goal of this task is to predict whether or not the second sentence followed the first in the original document.
### Training process
The training process consists of two steps: pre-training and fine-tuning.
#### Pre-training
Pre-training is performed using the `run_pretraining.py` script along with parameters defined in the `scripts/run_pretraining.sh`.
The `run_pretraining.sh` script runs a job on a single node that trains the BERT-large model from scratch using Wikipedia datasets as training data using the LAMB optimizer. By default, the training script runs two phases of training with a hyperparameter recipe specific to 8x A100 80G cards:
Phase 1: (Maximum sequence length of 128)
- Runs on 8 GPUs with a training batch size of 256 per GPU.
- Uses a learning rate of 6e-3.
- Has AMP enabled.
- Runs for 7038 steps, where the first 28.43% (2000) are warm-up steps
- Saves a checkpoint every 200 iterations (keeps only the latest three checkpoints) and at the end of training. All checkpoints and training logs are saved to the `results/` directory (in the container, which can be mounted to a local directory).
- Creates a log file containing all the output
Phase 2: (Maximum sequence length of 512)
- Runs on 8 GPUs with training batch size of 32 per GPU
- Uses a learning rate of 4e-3
- Has AMP enabled
- Runs for 1563 steps, where the first 12.8% are warm-up steps
- Saves a checkpoint every 200 iterations (keeps only the latest 3 checkpoints) and at the end of training. All checkpoints, and training logs are saved to the `results/` directory (in the container which can be mounted to a local directory).
- Creates a log file containing all the output
The default hyperparameters in `run_pretraining.sh` will train BERT-large model using FP16 arithmetic on the Wikipedia dataset to state-of-the-art accuracy on a DGXA100 with 8x80GB A100 cards. 90% of the training steps are done with sequence length 128 (phase 1 of training) and 10% of the training steps are done with sequence length 512 (phase 2 of training).
```shell
bash run_pretraining.sh \
<train_batch_size> \
<learning_rate> \
<precision> \
<num_gpus> \
<warmup_proportion> \
<train_steps> \
<save_checkpoint_steps> \
<create_logfile> \
<gradient_accumulation_steps> \
<seed> \
<job_name> \
<train_batch_size_phase2> \
<learning_rate_phase2> \
<warmup_proportion_phase2> \
<train_steps_phase2> \
<gradient_accumulation_steps_phase2> \
<dataset_dir_phase1> \
<dataset_dir_phase2> \
<code_dir> \
<init_checkpoint_dir> \
<wikipedia_source> \
<num_dask_workers> \
<num_shards_per_workers> \
<num_workers> \
<sample_ratio> \
<phase2_bin_size> \
<masking> \
<bert_config_file> \
<enable_benchmark> \
<benchmark_steps> \
<benchmark_warmup_steps> \
<fuse_mha>
```
Where:
- `<train_batch_size>` is the per-GPU batch size used for training. Larger batch sizes run more efficiently but require more memory.
- `<learning_rate>` is the base learning rate for training
- `<precision>` is the type of math in your model, which can be either `amp` or `fp32`. The options mean:
- AMP: Mixed precision 16 and 32-bit floats.
- FP32: 32-bit IEEE single precision floats.
- `<num_gpus>` is the number of GPUs to use for training. Must be equal to or smaller than the number of GPUs attached to your node.
- `<warmup_proportion>` is the percentage of training steps used for warm-up at the start of training.
- `<train_steps>` is the total number of training steps.
- `<save_checkpoint_steps>` controls how often checkpoints are saved.
- `<create_logfile>` a flag indicating if output should be written to a log file or not (acceptable values are `true` or `false`. `true` indicates output should be saved to a log file.)
- `<gradient_accumulation_steps>` an integer indicating the number of steps to accumulate gradients over.
- `<seed>` random seed for the run.
- `<train_batch_size_phase2>` is per-GPU batch size used for training in phase 2. Larger batch sizes run more efficiently, but require more memory.
- `<learning_rate_phase2>` is the base learning rate for training phase 2.
- `<warmup_proportion_phase2>` is the percentage of training steps used for warm-up at the start of training.
- `<train_steps_phase2>` is the total number of training steps for phase 2, to be continued in addition to phase 1.
- `<gradient_accumulation_steps_phase2>` is an integer indicating the number of steps to accumulate gradients in phase 2.
- `<dataset_dir_phase1>` is the path to dataset of phase 1. It should be a path to the folder containing `.hdf` files.
- `<dataset_dir_phase12` is the path to dataset of phase 2. It should be a path to the folder containing `.hdf` files.
- `<code_dir>` is the root path to bert code.
- `<init_checkpoint_dir>` is the path to the checkpoint to start the pretraining routine on (Usually a BERT pre-trained checkpoint).
- `wikipedia_source` is the path to the 'source' subdirectory for the Wikipedia corpus.
- `num_dask_workers` is the number of dask workers to preprocess the bert dataset.
- `num_shards_per_workers` is the number of the output parquet/txt shards per worker.
- `num_workers` is the number of workers for dataloading.
- `sample_ratio` is the ratio of how many articles/documents are sampled from each corpus.
- `phase2_bin_size` is the stride of the sequence length for each binbin size for phase2.
- `masking` LDDL supports both static and dynamic masking. Refer to [LDDL's README](https://github.com/NVIDIA/LDDL/blob/main/README.md) for more information.
- `<bert_config_file>` is the path to the bert config file.
- `<enable_benchmark>` a flag to enable benchmark. The train process will warmup for `<benchmark_warmup_steps>` and then measure the throughput of the following `<benchmark_steps>`.
- `<fuse_mha>` a flag to enable cuDNN MHA fusion.
Note that:
- If users follow [Quick Start Guide](#quick-start-guide) to set up container and dataset, there is no need to set any parameters. For example:
```shell
bash scripts/run_pretraining.sh
```
- It's common for users to mount their own datasets or customize the hyperparameters. If so, it's better to specify all parameters in `scripts/run_pretraining.sh` manually and ensure that all paths are correct. For example:
```shell
bash scripts/run_pretraining.sh \
256 6e-3 amp 8 0.2843 7038 200 false 32 0 bert_pretraining \
32 4e-3 0.128 1563 128 \
/path/to/dataset/phase1 \
/path/to/dataset/phase2 \
/workspace/bert \
None \
/path/to/wikipedia/source \
32 128 4 0.9 64 static \
None false
```
To run the pre-training routine on an initial checkpoint, point the `from-checkpoint` variable to the location of the checkpoint folder in `scripts/run_pretraining.sh`.
Both `scripts/run_pretraining.sh` and `scripts/run_squad.sh` rely on positional arguments, which means that if you want to change just one variable from the default value, you need to explicitly specify the values of all variables before it in the argument list, which can be annoying sometimes. Thus, it's also recommended for users to modify the predefined configurations or create their own configuration in `scripts/configs/` folder and run scripts like: `bash scripts/run_pretraining.sh $(source scripts/configs/<user_defined_config>.sh && <config_name>)`.
If users want to move away from bash scripts and launch python scripts directly. It's also recommended to run `run_pretraining.py` and `run_squad.py` with named arguments. For example:
```shell
python3 -m paddle.distributed.launch \
--gpus="0,1,2,3,4,5,6,7" \
./run_pretraining.py \
--input-dir=/path/to/dataset/phase1 \
--vocab-file=vocab/bert-large-uncased-vocab.txt \
--output-dir=./results \
--bert-model=bert-large-uncased \
--from-checkpoint=./results/bert-large-uncased/phase1 \
--last-step-of-checkpoint=auto \
--batch-size=256 \
--max-steps=7038 \
--num-steps-per-checkpoint=200 \
--log-freq=1 \
--max-seq-length=128 \
--max-predictions-per-seq=20 \
--gradient-merge-steps=32 \
--amp \
--fuse-mha \
--use-dynamic-loss-scaling \
--optimizer=Lamb \
--phase1 \
--scale-loss=1048576 \
--learning-rate=6e-3 \
--warmup-proportion=0.2843 \
--report-file=./results/dllogger_p1.json
```
We provide examples in `scripts/run_pretraining_p1.sh` for phase 1 pre-training and `scripts/run_pretraining_p2.sh` for phase 2 pre-training. For more imformation about each named argument, run `python run_pretraining.py -h` or `python run_squad.py -h`.
Note that:
- All `<batch_size>` arguments denote the size of a batch computed simultaneously on each GPU. For example, to run phase 1 pretraining on 8xA100, the `<train_batch_size>` is set to 256 and the `<gradient_accumulation_steps>` is set to 32 by default. Each GPU will compute 32 batches before performing a backward/update pass, which means the effective batch size per GPU is `256 * 32 = 8192` and the global batch size is `8192 * #GPUs = 65536`.
- Checkpoints will be stored in `<--output-dir>/<--bert-model>/<--phase>/<step_id>` folders.
For example, to run bert-large-uncased pre-training phase1 for 1000 steps and save checkpoints into `results/bert-large-uncased/phase1` for every 200 steps, set the following args:
```
--output-dir results
--bert-model bert-large-uncased
--phase1
--num-steps-per-checkpoint 200
--max_steps 1000
```
The lastest 3 checkpoints will be saved into `results/bert-large-uncased/phase1/600`, `results/bert-large-uncased/phase1/800` and `results/bert-large-uncased/phase1/1000`.
Each checkpoint includes four files:
- `<--model-prefix>.pdparams`: A file contains all the trainable tensors.
- `<--model-prefix>.pdopts`: A file contains all the tensors used by the optimizer. For Adam optimizer, it contains beta1, beta2, momentum, and so on. (If the optimizer has no Tensor need to save (like SGD), no file will be generated).
- `<--model-prefix>.pdmodel`: A file to keep the description of the program. It's only used for deployment.
- `<--model-prefix>_progress.json>`: The record of training progress, including file_id, step_id, phase_id, etc.
Make sure:
- Resume from checkpoints with `--from-checkpoint`: Both `<--model-prefix>.pdopts` and `<--model-prefix>.pdparams` must be in the given path.
- Start from pre-trained weights with `--from-pretrained-params`: `<--model-prefix>.pdparams` must be in the given path.
- Don't set `--from-checkpoint` and `--from-pretrained-params` at the same time.
The difference between those two is that `--from-pretrained-params` contain only model weights, and `--from-checkpoint`, apart from model weights, contain the optimizer state and LR scheduler state.
`--from-checkpoint` is suitable for dividing the training into parts, for example, in order to divide the training job into shorter stages, or restart training after infrastructure faults.
`--from-pretrained-params` can be used to set the initial parameter of phase2 or as a base for fine-tuning the model to a different dataset.
Assume the training process aborts during the 700th step due to infrastructure faults in the previous example, which means the latest checkpoint is saved in `results/bert-large-uncased/phase1/600`. To resume training from the checkpoint, specify `--from-checkpoint` and `--last-step-of-checkpoint` with following these steps:
- Set `--from-checkpoint` to `results/bert-large-uncased/phase1/600`.
- Set `--last-step-of-checkpoint` to `600`, which can also be omitted because `results/bert-large-uncased/phase1/600/bert_paddle_progress.json` records the last step id.
Then rerun the training to resume training from the 601th step to the 1000th step.
We also provide automatic searching for the checkpoint from the last step. It can be enabled by setting `--last-step-of-checkpoint` as `auto`. Noted that if enable automatic searching, `--from-checkpoint` should be a folder containing checkpoint files or `<step_id>/<ckpt_files>`. In the previous example, it should be `results/bert-large-uncased/phase1`.
`--from-phase1-final-params` actually does the same thing as `--from-pretrained-params`. The difference is that the former only works in the first step of phase2, while the latter always works as long as it is set.
Assume you want to train bert-large from scratch however the train progress needs to be restarted many times due to the cluster time limit. To avoid changing `--from-checkpoint` at each run, set the following args:
- Phase1
```
--from-checkpoint results/bert-large-uncased/phase1
--last-step-of-checkpoint auto
```
- Phase2
```
--from-checkpoint results/bert-large-uncased/phase2
--last-step-of-checkpoint auto
--from-phase1-final-params results/bert-large-uncased/phase1/7038
```
At each rerun, the program will automatically find the checkpoint without having to specify it manually.
#### Fine-tuning
Fine-tuning is provided for a variety of tasks. The following tasks are included with this repository through the following scripts (only support SQuAD for now):
- Question Answering (`scripts/run_squad.sh`)
By default, each Python script implements fine-tuning a pre-trained BERT model for a specified number of training epochs as well as evaluation of the fine-tuned model. Each shell script invokes the associated Python script with the following default parameters:
- Uses 8 GPUs
- Has FP16 precision enabled
- Saves a checkpoint at the end of training to the `./results/checkpoints/squad` folder
Fine-tuning Python scripts implement support for mixed precision and multi-GPU training. For a full list of parameters and associated explanations, refer to the [Parameters](#parameters) section.
The fine-tuning shell scripts have positional arguments outlined below:
```shell
bash scripts/run_squad.sh \
<checkpoint_to_load> \
<epochs> \
<batch_size per GPU> \
<learning rate> \
<warmup_proportion> \
<precision (either `amp` or `fp32`)> \
<number of GPUs to use> \
<seed> \
<SQuAD_DATA_DIR> \
<VOCAB_FILE> \
<OUTPUT_DIR> \
<mode (either `train`, `eval` or `train eval`)> \
<CONFIG_FILE> \
<max_steps> \
<enable_benchmark> \
<benchmark_steps> \
<benchmark_warmup_steps> \
<fuse_mha>
```
By default, the `mode` argument is set to `train eval`. Refer to the [Quick Start Guide](#quick-start-guide) for explanations of each positional argument.
Note: The first positional argument (the path to the checkpoint to load) is required.
The fine-tuning script assumes that the corresponding dataset files exist in the `data/` directory or a separate path can be a command-line input to `run_squad.sh`.
### Inference process
Fine-tuning inference can be run to obtain predictions on fine-tuning tasks, for example, Q&A on SQuAD. Evaluation fine-tuning is enabled by the same scripts as training:
- Question Answering (`run_squad.py`)
The `mode` argument of the shell script is used to run in evaluation mode. The fine-tuned BERT model will be run on the evaluation dataset, and the evaluation loss and accuracy will be displayed.
Each inference shell script expects dataset files to exist in the same locations as the corresponding training scripts. The inference scripts can be run with default settings. By setting the `mode` variable in the script to either `eval` or `prediction` flag, you can choose between running predictions and evaluating them on a given dataset or just obtain the model predictions.
`bash scripts/run_squad.sh <path to fine-tuned model checkpoint>`
## Performance
### Benchmarking
The following section shows the steps to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
Training performance benchmarks for pre-training can be obtained by running `scripts/run_pretraining.sh`, and fine-tuning can be obtained by running `scripts/run_squad.sh` for SQuAD, respectively. The required parameters can be passed through the command-line as described in [Training process](#training-process).
To benchmark the training performance on a specific batch size for pre-training, refer to [Pre-training](#pre-training) and turn on the `<benchmark>` flags. An example call to run pretraining for 20 steps (10 steps for warmup and 10 steps to measure, both in phase1 and phase2) and generate throughput numbers:
```shell
bash scripts/run_pretraining.sh \
256 6e-3 amp 8 0.2843 7038 200 false \
32 0 bert_pretraining 32 4e-3 0.128 1563 128 \
/path/to/dataset/phase1 \
/path/to/dataset/phase2 \
/workspace/bert \
None \
/path/to/wikipedia/source \
32 128 4 0.9 64 static \
None true 10 10 true
```
To benchmark the training performance on a specific batch size for SQuAD, refer to [Fine-tuning](#fine-tuning) and turn on the `<benchmark>` flags. An example call to run training for 200 steps (100 steps for warmup and 100 steps to measure), and generate throughput numbers:
```shell
bash scripts/run_squad.sh \
/path/to/pretrained/model \
2 32 4.6e-5 0.2 amp 8 42 \
/path/to/squad/v1.1 \
vocab/bert-large-uncased-vocab.txt \
results/checkpoints \
train \
bert_configs/bert-large-uncased.json \
-1 true 100 100 true
```
#### Inference performance benchmark
Inference performance benchmark for fine-tuning can be obtained by running `scripts/run_squad.sh`. The required parameters can be passed through the command-line as described in [Inference process](#inference-process).
To benchmark the inference performance on a specific batch size for SQuAD, run:
```shell
bash scripts/run_squad.sh \
<pre-trained model path> \
<epochs> <batch size> <learning rate> <warmup_proportion> <amp|fp32> <num_gpus> <seed> \
<path to SQuAD dataset> \
<path to vocab set> \
<results directory> \
eval \
<BERT config path> \
<max steps> <benchmark> <benchmark_steps> <benchmark_warmup_steps> \
<fuse_mha>
```
An example call to run inference and generate throughput numbers:
```
bash scripts/run_squad.sh \
/path/to/pretrained/model \
2 32 4.6e-5 0.2 amp 8 42 \
/path/to/squad/v1.1 \
vocab/bert-large-uncased-vocab.txt \
results/checkpoints \
eval \
bert_configs/bert-large-uncased.json \
-1 true 100 100 true
```
### Results
The following sections provide details on how we achieved our performance and accuracy in training and inference.
#### Training accuracy results
Our results were obtained by running the `scripts/run_squad.sh` and `scripts/run_pretraining.sh` training scripts in the paddle NGC container unless otherwise specified.
##### Pre-training loss results: NVIDIA DGX A100 (8x A100 80GB)
| DGX System | GPUs / Node | Precision | Accumulated Batch size / GPU (Phase 1 and Phase 2) | Accumulation steps (Phase 1 and Phase 2) | Final Loss | Time to train(hours) | Time to train speedup (TF32 to mixed precision) |
|--------------------|-------------|-----------|----------------------------------------------------|------------------------------------------|-------------------|----------------------|-------------------------------------------------|
| 32 x DGX A100 80GB | 8 | AMP | 256 and 128 | 1 and 4 | 1.409 | ~ 1.1 hours | 2.27 |
| 32 x DGX A100 80GB | 8 | TF32 | 128 and 16b | 2 and 8 | 1.421 | ~ 2.5 hours | 1 |
##### Pre-training loss curves
![Pre-training Loss Curves](images/bert_loss.png)
##### Fine-tuning accuracy results: NVIDIA DGX A100 (8x A100 80GB)
* SQuAD
| GPUs | Batch size / GPU (TF32 and FP16) | Accuracy - TF32(% F1) | Accuracy - mixed precision(% F1) | Time to train(hours) - TF32 | Time to train(hours) - mixed precision | Time to train speedup (TF32 to mixed precision) |
|------|----------------------------------|-----------------------|----------------------------------|-----------------------------|----------------------------------------|-------------------------------------------------|
| 8 | 32 | 91.13 | 91.11 | 0.078 | 0.056 | 1.39 |
##### Training stability test
###### Pre-training stability test
| Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Mean | Standard Deviation |
|-----------------|--------|--------|--------|--------|--------|-------|--------------------|
| Final Loss | 1.409 | 1.367 | 1.528 | 1.434 | 1.470 | 1.442 | 0.049 |
###### Fine-tuning stability test
* SQuAD
Training stability with 8 GPUs, FP16 computations, batch size of 32:
| Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Seed 5 | Seed 6 | Seed 7 | Seed 8 | Seed 9 | Mean | Standard Deviation |
|-----------------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------|-------|--------------------|
| Exact Match % | 84.07 | 84.39 | 83.94 | 83.78 | 83.85 | 84.47 | 84.13 | 84.20 | 84.03 | 83.80 | 84.07 | 0.225 |
| f1 % | 90.86 | 91.00 | 90.82 | 90.56 | 90.76 | 91.11 | 90.77 | 90.90 | 90.65 | 90.54 | 90.80 | 0.173 |
#### Training performance results
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
Our results were obtained by running the script `run_pretraining.sh` in the PaddlePaddle:22.12-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in sequences per second) were averaged over a few training iterations.
###### Pre-training NVIDIA DGX A100 (8x A100 80GB)
| GPUs | Batch size / GPU (TF32 and FP16) | Accumulation steps (TF32 and FP16) | Sequence length | Throughput - TF32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|------|----------------------------------|------------------------------------|-----------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------|
| 1 | 8192 and 8192 | 64 and 32 | 128 | 307 | 694 | 2.26 | 1.00 | 1.00 |
| 8 | 8192 and 8192 | 64 and 32 | 128 | 2428 | 5541 | 2.28 | 7.91 | 7.98 |
| 1 | 4096 and 4096 | 256 and 128 | 512 | 107 | 264 | 2.47 | 1.00 | 1.00 |
| 8 | 4096 and 4096 | 256 and 128 | 512 | 851 | 2109 | 2.48 | 7.95 | 7.99 |
###### Pre-training NVIDIA DGX A100 (8x A100 80GB) Multi-node Scaling
| Nodes | GPUs / node | Batch size / GPU (TF32 and FP16) | Accumulated Batch size / GPU (TF32 and FP16) | Accumulation steps (TF32 and FP16) | Sequence length | Mixed Precision Throughput | Mixed Precision Strong Scaling | TF32 Throughput | TF32 Strong Scaling | Speedup (Mixed Precision to TF32) |
|-------|-------------|----------------------------------|------------------------------------|-----------------|----------------------------|--------------------------------|-----------------|---------------------|-----------------------------------|-----|
| 1 | 8 | 126 and 256 | 8192 and 8192 | 64 and 32 | 128 | 5541 | 1 | 2428 | 1 | 2.28 |
| 2 | 8 | 126 and 256 | 4096 and 4096 | 32 and 16 | 128 | 10646 | 1.92 | 4638 | 1.91 | 2.29 |
| 4 | 8 | 126 and 256 | 2048 and 2048 | 16 and 8 | 128 | 21389 | 3.86 | 9445 | 3.89 | 2.26 |
| 8 | 8 | 126 and 256 | 1024 and 1024 | 8 and 4 | 128 | 41681 | 7.52 | 18335 | 7.55 | 2.27 |
| 16 | 8 | 126 and 256 | 512 and 512 | 4 and 2 | 128 | 79023 | 14.26 | 35526 | 14.63 | 2.22 |
| 32 | 8 | 126 and 256 | 256 and 256 | 2 and 1 | 128 | 157952 | 28.51 | 69701 | 28.71 | 2.27 |
| 1 | 8 | 16 and 32 | 4096 and 4096 | 256 and 128 | 512 | 2109 | 1 | 851 | 1 | 2.48 |
| 2 | 8 | 16 and 32 | 2048 and 2048 | 128 and 64 | 512 | 4051 | 1.92 | 1601 | 1.88 | 2.53 |
| 4 | 8 | 16 and 32 | 1024 and 1024 | 64 and 32 | 512 | 7972 | 3.78 | 3240 | 3.81 | 2.46 |
| 8 | 8 | 16 and 32 | 512 and 512 | 32 and 16 | 512 | 15760 | 7.47 | 6329 | 7.44 | 2.49 |
| 16 | 8 | 16 and 32 | 256 and 256 | 16 and 8 | 512 | 31129 | 14.76 | 12273 | 14.42 | 2.54 |
| 32 | 8 | 16 and 32 | 128 and 128 | 8 and 4 | 512 | 60206 | 28.55 | 24047 | 28.26 | 2.50 |
###### Fine-tuning NVIDIA DGX A100 (8x A100 80GB)
* SQuAD
| GPUs | Batch size / GPU (TF32 and FP16) | Throughput - TF32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision |
|------|----------------------------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------|
| 1 | 32 and 32 | 83 | 123 | 1.48 | 1.00 | 1.00 |
| 8 | 32 and 32 | 629 | 929 | 1.48 | 7.59 | 7.55 |
#### Inference performance results
##### Inference performance: NVIDIA DGX A100 (1x A100 80GB)
Our results were obtained by running `scripts/run_squad.sh` in the PaddlePaddle:22.08-py3 NGC container on NVIDIA DGX A100 with (1x A100 80G) GPUs.
###### Fine-tuning inference on NVIDIA DGX A100 (1x A100 80GB)
* SQuAD
| GPUs | Batch Size (TF32/FP16) | Sequence Length | Throughput \- TF32\(sequences/sec\) | Throughput \- Mixed Precision\(sequences/sec\) |
|------|---------------------------|-----------------|-------------------------------------|------------------------------------------------|
| 1 | 32/32 | 384 | 131 | 158 |
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
The inference performance metrics used were items/second.
## Release notes
### Changelog
January 2023
- [Pre-training using Language Datasets and Data Loaders (LDDL)](https://github.com/NVIDIA/LDDL)
- Binned pretraining for phase2 with LDDL using a bin size of 64
August 2022
- Pre-training support with LAMB optimizer.
- Updated Data download and Preprocessing.
- Integrate DLLogger.
- Pre-training with AMP, Fleet.
- Pre-training support with Lamb optimizer.
- SQuAD finetune support with AdamW optimizer.
- Updated accuracy and performance tables tested on A100.
- Initial release.
March 2023
- Pre-training using [Language Datasets and Data Loaders (LDDL)](https://github.com/NVIDIA/LDDL)
- Binned pretraining for phase2 with LDDL using a bin size of 64
July 2023
- Optimize AMP training with cuDNN fused dot product attention kernel.
### Known issues
There are no known issues with this model.
|
TensorFlow2/Detection/Efficientdet/dataset | dataset | get_coco | #!/bin/bash
# Download and extract coco 2017
mkdir -p /workspace/coco
cd /workspace/coco
wget http://images.cocodataset.org/zips/train2017.zip
wget http://images.cocodataset.org/zips/val2017.zip
wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
unzip train2017.zip
unzip val2017.zip
unzip annotations_trainval2017.zip
# Convert to tfrecord format
cd /workspace/effdet-tf2
python dataset/create_coco_tfrecord.py --image_dir=/workspace/coco/train2017 \
--caption_annotations_file=/workspace/coco/annotations/captions_train2017.json \
--output_file_prefix=/workspace/coco/train --num_shards=256
python dataset/create_coco_tfrecord.py --image_dir=/workspace/coco/val2017 \
--caption_annotations_file=/workspace/coco/annotations/captions_val2017.json \
--output_file_prefix=/workspace/coco/val --num_shards=32
# Clean up
rm /workspace/coco/*.zip
rm -rf /workspace/coco/train2017
rm -rf /workspace/coco/val2017
|
TensorFlow/Translation/GNMT/examples | examples | DGX1_FP32_1GPU | python nmt.py --output_dir=results --batch_size=128 --learning_rate=5e-4
|
PyTorch/SpeechSynthesis/Tacotron2/notebooks/triton | triton | run_this | jupyter lab --allow-root --ip=0.0.0.0 --no-browser notebook.ipynb |
Tools/PyTorch/TimeSeriesPredictionPlatform/inference | inference | inference_triton | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import subprocess
import hydra
from omegaconf import OmegaConf
from triton.dataloader import get_dataloader_fn
from loggers.log_helper import setup_logger
import dllogger
from data.data_utils import Preprocessor
def run_inference_triton(config):
cfg = config
with open(os.path.join(cfg.checkpoint, ".hydra/config_merged.yaml"), "rb") as f:
config = OmegaConf.load(f)
config.evaluator = OmegaConf.merge(config.evaluator, cfg.evaluator)
if cfg.get("dataset_dir", None):
if not os.path.isdir(config.dataset.config.dest_path):
raise ValueError("dataset_dir must be a directory")
config.dataset.config.dest_path = cfg.dataset_dir
config.inference = cfg
with open(os.path.join(cfg.checkpoint, ".hydra/config_merged.yaml"), "wb") as f:
OmegaConf.resolve(config)
OmegaConf.save(config=config, f=f.name)
output_path = os.path.join(cfg.checkpoint, "deployment")
tspp_main_dir = os.path.sep + os.path.join(*(os.getcwd().split(os.path.sep)[:-3]))
# get the actual model name
if not os.path.isdir(os.path.join(output_path, "navigator_workspace")) or not os.path.isdir(
os.path.join(output_path, "navigator_workspace/model-store")
):
if os.path.isdir(os.path.join(output_path, "navigator_workspace/final-model-store")):
shutil.copytree(os.path.join(output_path, "navigator_workspace/final-model-store"), os.path.join(output_path, "navigator_workspace/model-store"))
else:
assert (
False
), "This checkpoint directory is not configured correctly, there should be a dir/deployment/navigator_workspace/model-store/ directory"
files_in_store = list(os.listdir(os.path.join(output_path, "navigator_workspace/model-store")))
if len(files_in_store) < 1:
assert False, "There needs to be exactly 1 model in the model-store directory"
evaluator = hydra.utils.call(config.evaluator)
if config.dataset.config.get('xgb', False):
if cfg.get("dataset_path", None):
preprocessor = Preprocessor(config.dataset.config)
if cfg.get("preproc_state_path", None):
preprocessor_state_file = cfg.preproc_state_path
else:
preprocessor_state_file = None
preprocessor.load_state(preprocessor_state_file)
test_df = preprocessor.preprocess_test(dataset=cfg.dataset_path)
test_df = preprocessor.apply_scalers(test_df)
test_df = preprocessor.impute(test_df)
train, valid, test = hydra.utils.call(config.dataset, input_df=test_df)
else:
train, valid, test = hydra.utils.call(config.dataset)
del train, valid
preds_full, labels_full, ids_full, weights_full = evaluator.predict_xgboost(test, max_batch_size=cfg.batch_size)
elif config.dataset.config.get('stat', False):
raise ValueError("Stat models not supported on triton")
else:
model_name = cfg.get("model_name") if cfg.get("model_name", None) else files_in_store[0]
dataloader = get_dataloader_fn(cfg.checkpoint, cfg.batch_size)
preds_full, labels_full, ids_full, weights_full = evaluator.predict(dataloader, model_name)
#Need to merge the eval configs here
metrics = evaluator.evaluate(preds_full, labels_full, ids_full, weights_full)
logger = setup_logger(cfg)
logger.log(step=[], data={k: float(v) for k, v in metrics.items()}, verbosity=dllogger.Verbosity.VERBOSE)
logger.log(step='event', data={"String": "Evaluation Metrics: {}".format(metrics)}, verbosity=dllogger.Verbosity.DEFAULT)
print(metrics) |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp | trtis_cpp | README | # Tacotron2+WaveGlow Inference Using TensorRT Inference Server with TensorRT
This is a subfolder of the Tacotron2 for PyTorch repository that provides
scripts to deploy high-performance inference using NVIDIA TensorRT Inference
Server with a custom TensorRT
[backend](https://docs.nvidia.com/deeplearning/sdk/tensorrt-inference-server-guide/docs/build.html#building-a-custom-backend).
## Table of contents
* [Model overview](#model-overview)
- [Tacotron2 plugins](#tacotron2-plugins)
* [Setup](#setup)
- [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
- [Export the models](#export-the-models)
- [Setup the Triton server](#setup-the-trtis-server)
- [Setup the Triton client](#setup-the-trtis-client)
- [Starting the Triton server](#starting-the-trtis-server)
- [Running the Triton client](#running-the-trtis-client)
* [Advanced](#advanced)
- [Code structure](#code-structure)
- [Precision](#precision)
* [Performance](#performance)
- [Performance on NVIDIA T4](#performance-on-nvidia-t4)
- [Running the benchmark](#running-the-benchmark)
## Model overview
The Tacotron2 and WaveGlow models form a text-to-speech system that enables
users to synthesize natural sounding speech from raw transcripts without any
additional information such as patterns and/or rhythms of speech.
In this implementation, the Tacotron2 network is split into three sub-networks,
the encoder, decoder, and postnet.
This is followed by WaveGlow as a vocoder, and a Denoiser network using a
[STFT](https://en.wikipedia.org/wiki/Short-time_Fourier_transform)
to remove noise from the audio output.
More information on the Tacotron2 and WaveGlow architectures can be found in
[Tacotron2 PyTorch README](../README.md), as well as information about
training.
### Tacotron2 plugins
Because the size of the layers in Tacotron2's decoder, are quite small, many
deep learning frameworks fail achieve high throughput for a batch size of one,
as the overhead
associated with executing each of these small layers can dominate the runtime.
TensorRT supports custom layers through its
[plugin](https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#pluginv2-layer)
interface, which not only allows custom operations, but also allows
developers to manually tune and/or fuse specific layers in their
networks while still using TensorRT to perform automated optimizations on the
other layers, and to manage and execute the entire network.
This implementation uses several plugins for Tacotron2's decoder,
including fusing layers of the Prenet and Attention, as well as creating LSTM
Cell kernels optimized specifically for the dimensions used in Tacotron2.
## Setup
### Requirements
Building and running the container requires `docker`, `nvidia-docker` and `bash`.
In addition to this, the host machine must have a Volta or Turing based GPU.
## Quick Start Guide
### Clone the repository
```bash
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp
```
### Export the models
You can either train models yourself, or download pretrained checkpoints from [NGC](https://ngc.nvidia.com/catalog/models) and copy them to the `./checkpoints` directory:
- [Tacotron2 checkpoint](https://ngc.nvidia.com/models/nvidia:tacotron2pyt_fp16)
- [WaveGlow checkpoint](https://ngc.nvidia.com/models/nvidia:waveglow256pyt_fp16)
```bash
mkdir checkpoints
cp <Tacotron2_checkpoint> ./checkpoints/
cp <WaveGlow_checkpoint> ./checkpoints/
```
Next you will need to export the PyTorch checkpoints so that they can be used to build TensorRT engines. This can be done via the script `export_weights.sh` script:
```bash
mkdir models
./export_weights.sh checkpoints/tacotron2_1032590_6000_amp checkpoints/waveglow_1076430_14000_amp models/
```
### Setup the Triton server
```bash
./build_trtis.sh models/tacotron2.json models/waveglow.onnx models/denoiser.json
```
This will take some time as TensorRT tries out different tactics for best
performance while building the engines.
### Setup the Triton client
Next you need to build the client docker container. To do this, enter the
`trtis_client` directory and run the script `build_trtis_client.sh`.
```bash
cd trtis_client
./build_trtis_client.sh
cd ..
```
### Run the Triton server
To run the server locally, use the script `run_trtis_server.sh`:
```bash
./run_trtis_server.sh
```
You can use the environment variable `NVIDIA_VISIBLE_DEVICES` to set which GPUs
the Triton server sees.
### Run the Triton client
Leave the server running. In another terminal, type:
```bash
cd trtis_client/
./run_trtis_client.sh phrases.txt
```
This will generate one WAV file per line in the file `phrases.txt`, named after
the line number (e.g., 1.wav through 8.wav for a 8 line file) in the `audio/`
directory. It is
important that each line in the file end with a period, or Tacotron2 may fail
to detect the end of the phrase.
## Advanced
### Code structure
The `src/` contains the following sub-directories:
* `trtis`: The directory containing code for the custom Triton backend.
* `trt/tacotron2`: The directory containing the Tacotron2 implementation in TensorRT.
* `trt/waveglow`: The directory containing the WaveGlow implementation in TensorRT.
* `trt/denoiser`: The directory containing the Denoiser (STFT) implementation in TensorRT.
* `trt/plugins`: The directory containing plugins used by the TensorRT engines.
The `trtis_client/` directory contains the code for running the client.
### Precision
By default the `./build_trtis.sh` script builds the TensorRT engines with FP16 mode enabled, which allows some operations to be performed in lower precision, in order to increase throughput. To use engines with only FP32 precision, add `0` to `./build_trtis.sh`’s arguments:
```bash
./build_trtis.sh models/tacotron2.json models/waveglow.onnx models/denoiser.json 0
```
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
The following tables show inference statistics for the Tacotron2 and WaveGlow
text-to-speech system.
The tables include average latency, latency standard deviation,
and latency confidence intervals. Throughput is measured as the number of
generated audio samples per second. RTF is the real-time factor which
tells how many seconds of speech are generated in 1 second of processing time.
For all tests in these tables, we used WaveGlow with 256 residual channels.
### Performance on NVIDIA T4
#### TensorRT \w Plugins vs. PyTorch
Latency in this table is measured from just before the input sequence starts
being copied from host memory to the GPU,
to just after the generated audio finishes being copied back to the host
memory.
That is, what is taking place in the custom backend inside of Triton.
|Framework|Batch size|Input length|Precision|Avg latency (s)|Latency std (s)| Latency interval 90% (s)|Latency interval 95% (s)|Latency interval 99% (s)| Throughput (samples/sec) | Speed-up vs. PyT FP32 | Speed-up vs. PyT FP16 | Avg mels generated |Avg audio length (s)|Avg RTF|
|------:|----:|-----:|-----------:|--------:|------:|------:|------:|------:|------:|------:|----:|------:|-------:|---:|
| TRT \w plugins | 1 | 128 | FP16 | 0.40 | 0.00 | 0.40 | 0.40 | 0.40 | 369,862 | __4.27x__ | __3.90x__ | 579 | 6.72 | 16.77 |
| TRT \w plugins | 1 | 128 | FP32 | 1.20 | 0.01 | 1.21 | 1.21 | 1.21 | 123,922 | __1.43x__ | __1.31x__ | 581 | 6.74 | 5.62 |
| PyTorch | 1 | 128 | FP16 | 1.63 | 0.07 | 1.71 | 1.73 | 1.81 | 94,758 | __1.10x__ | __1.00x__ | 601 | 6.98 | 4.30 |
| PyTorch | 1 | 128 | FP32 | 1.77 | 0.08 | 1.88 | 1.92 | 2.00 | 86,705 | __1.00x__ | __0.91x__ | 600 | 6.96 | 3.92 |
That is a __3.72x__ speedup when using TensorRT FP16 with plugins when compared to
PyTorch FP32, and still a __3.39x__ speedup when compared to PyTorch FP16.
The TensorRT entries in this table can be reproduced by using the output of
the Triton server, when performing the steps for [Running the
benchmark](#running-the-benchmark) below.
The PyTorch entries can be reproduced by following the instructions
[here](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/SpeechSynthesis/Tacotron2).
#### TensorRT \w Plugins in Triton
Latency in this table is measured from the client sending the request, to it
receiving back the generated audio. This includes network time,
request/response formatting time, as well as the backend time shown in the
above section.
|Batch size|Input length|Precision|Avg latency (s)|Latency std (s)| Latency interval 90% (s)|Latency interval 95% (s)|Latency interval 99% (s)|Avg mels generated |Avg audio length (s)|Avg RTF|
|---:|----:|-----:|------:|------:|------:|------:|------:|----:|------:|-------:|
| 1 | 128 | FP16 | 0.42 | 0.00 | 0.42 | 0.42 | 0.42 | 579 | 6.72 | 15.95 |
| 8 | 128 | FP16 | 2.55 | 0.01 | 2.56 | 2.56 | 2.57 | 571 | 6.62 | 2.60 |
| 1 | 128 | FP32 | 1.22 | 0.01 | 1.22 | 1.23 | 1.23 | 581 | 6.75 | 5.54 |
| 8 | 128 | FP32 | 8.64 | 0.01 | 8.68 | 8.69 | 8.71 | 569 | 6.61 | 0.72 |
To reproduce this table, see [Running the benchmark](#running-the-benchmark)
below.
### Running the benchmark
Once you have performed the steps in [Setup the Triton server](#setup-the-trtis-server) and
[Setup the Triton client](#setup-the-trtis-client), you can run the benchmark by starting the Triton server via:
```bash
./run_trtis_server.sh
```
Leave the server running, and in another terminal run the script `trtis_client/run_trtis_benchmark_client.sh`:
```bash
cd trtis_client/
./run_trtis_benchmark_client.sh <batch size>
```
Replace <batch size> with the desired batch size between 1 and 32. The engines are built with a maximum batch size of 32 in the `./build_trtis.sh` script.
After some time this should produce output like:
```
Performed 1000 runs.
batch size = 1
avg latency (s) = 0.421375
latency std (s) = 0.00170839
latency interval 50% (s) = 0.421553
latency interval 90% (s) = 0.422805
latency interval 95% (s) = 0.423273
latency interval 99% (s) = 0.424153
average mels generated = 582
average audio generated (s) = 6.72218
average real-time factor = 15.953
```
|
PyTorch/LanguageModeling/BERT/triton/deployment_toolkit | deployment_toolkit | core | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class Accelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
CUDA = NONE # backward compatibility
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
FASTERTRANSFORMER = "fastertransformer"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class BatchingMode(Enum):
"""
Available batching modes
"""
STATIC = "static"
DYNAMIC = "dynamic"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
SYSTEM = "system"
CUDA = "cuda"
|
PyTorch/Segmentation/nnUNet/triton | triton | run_online_performance_test_on_triton | #!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
|
PyTorch/Forecasting/TFT/triton/scripts | scripts | setup_environment | #!/usr/bin/env bash
# Copyright (c) 2021-2022 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
WORKDIR="${WORKDIR:=$(pwd)}"
export DATASETS_DIR=${WORKDIR}/datasets
export WORKSPACE_DIR=${WORKDIR}/runner_workspace
export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints
export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store
export SHARED_DIR=${WORKSPACE_DIR}/shared_dir
echo "Preparing directories"
mkdir -p ${WORKSPACE_DIR}
mkdir -p ${DATASETS_DIR}
mkdir -p ${CHECKPOINTS_DIR}
mkdir -p ${MODEL_REPOSITORY_PATH}
mkdir -p ${SHARED_DIR}
echo "Setting up environment"
export MODEL_NAME=TFT
export ENSEMBLE_MODEL_NAME=
export TRITON_LOAD_MODEL_METHOD=explicit
export TRITON_INSTANCES=1 |
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/waveglow | waveglow | waveGlowStreamingInstance | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "waveGlowStreamingInstance.h"
#include "cudaUtils.h"
#include "trtUtils.h"
#include "NvInfer.h"
#include <stdexcept>
#include <string>
using namespace nvinfer1;
namespace tts
{
/******************************************************************************
* CONSTANTS ******************************************************************
*****************************************************************************/
namespace
{
constexpr const char* const MEL_INPUT_NAME = "spect";
constexpr const char* const Z_INPUT_NAME = "z";
constexpr const char* const OUTPUT_NAME = "audio";
} // namespace
/******************************************************************************
* HELPER FUNCTIONS ***********************************************************
*****************************************************************************/
namespace
{
void setBatchDimensions(IExecutionContext* const context, const int batchSize)
{
const ICudaEngine& engine = context->getEngine();
Dims melDims = engine.getBindingDimensions(0);
melDims.d[0] = batchSize;
context->setBindingDimensions(0, melDims);
}
} // namespace
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
WaveGlowStreamingInstance::WaveGlowStreamingInstance(
TRTPtr<ICudaEngine>&& eng) :
TimedObject("WaveGlowStreamingInstance::infer()"),
EngineDriver(std::move(eng)),
mChunkSize(TRTUtils::getBindingDimension(getEngine(), MEL_INPUT_NAME, 2)),
mSamplesPerFrame(256),
mChunkSampleSize(
TRTUtils::getNonBatchBindingSize(getEngine(), OUTPUT_NAME)),
mTruncatedChunkSampleSize(mSamplesPerFrame * mChunkSize),
mInputChannels(
TRTUtils::getBindingDimension(getEngine(), MEL_INPUT_NAME, 3)),
mZChannels(TRTUtils::getBindingDimension(getEngine(), Z_INPUT_NAME, 1)),
mBatchSize(1),
mBinding(),
mContext(getEngine().createExecutionContext()),
mRand(mChunkSampleSize, 0),
mZ(TRTUtils::getMaxBindingSize(getEngine(), Z_INPUT_NAME))
{
const int zChunkSize = TRTUtils::getBindingDimension(getEngine(), Z_INPUT_NAME, 1);
if (zChunkSize * mZChannels > mChunkSampleSize)
{
throw std::runtime_error("Expected z to be of dimension at most: " + std::to_string(mZChannels) + "x"
+ std::to_string(mChunkSampleSize / mZChannels) + " but engine has " + std::to_string(mZChannels) + "x"
+ std::to_string(zChunkSize));
}
// generate z vector
mRand.setSeed(0, 0);
// set batch size to 1 by default
setBatchDimensions(mContext.get(), mBatchSize);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void WaveGlowStreamingInstance::startInference(const int batchSize, cudaStream_t stream)
{
bool newBatchSize = mBatchSize != batchSize;
mBatchSize = batchSize;
mRand.generate(mZ.data(), mZ.size(), stream);
if (newBatchSize) {
// only set batch dimensions if they have changed
setBatchDimensions(mContext.get(), mBatchSize);
}
const ICudaEngine& engine = mContext->getEngine();
mBinding.setBinding(engine, Z_INPUT_NAME, mZ.data());
}
void WaveGlowStreamingInstance::inferNext(cudaStream_t stream, const float* const melsDevice, const int* const numMels,
float* outputDevice, int* numSamplesOut)
{
startTiming();
const ICudaEngine& engine = mContext->getEngine();
for (int batchIdx = 0; batchIdx < mBatchSize; ++batchIdx)
{
if (numMels[batchIdx] > mChunkSize)
{
throw std::runtime_error("Cannot work on chunk of " + std::to_string(numMels[batchIdx]) + ", maximum is "
+ std::to_string(mChunkSize));
}
}
// queue up work on the GPU
mBinding.setBinding(engine, MEL_INPUT_NAME, melsDevice);
mBinding.setBinding(engine, OUTPUT_NAME, outputDevice);
if (!mContext->enqueueV2(mBinding.getBindings(), stream, nullptr))
{
throw std::runtime_error("Failed to enqueue WaveGlow.");
}
// then do CPU work as needed
for (int batchIdx = 0; batchIdx < mBatchSize; ++batchIdx)
{
numSamplesOut[batchIdx] = numMels[batchIdx] * mSamplesPerFrame;
}
CudaUtils::sync(stream);
stopTiming();
}
int WaveGlowStreamingInstance::getNumberOfSamplesPerFrame() const
{
return mSamplesPerFrame;
}
int WaveGlowStreamingInstance::getMelSpacing() const
{
return mChunkSize;
}
int WaveGlowStreamingInstance::getNumMelChannels() const
{
return mInputChannels;
}
int WaveGlowStreamingInstance::getMaxOutputLength() const
{
return mChunkSize * mSamplesPerFrame;
}
int WaveGlowStreamingInstance::getOutputSpacing() const
{
return getMaxOutputLength() + 768;
}
int WaveGlowStreamingInstance::getRequiredOutputBufferSize(const int batchSize) const
{
return getOutputSpacing() * batchSize;
}
} // namespace tts
|
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trtis | trtis | CustomInputReader | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "CustomInputReader.hpp"
#include <algorithm>
#include <cstring>
#include <stdexcept>
#include <string>
using namespace tts;
/******************************************************************************
* CONSTRUCTORS / DESTRUCTOR **************************************************
*****************************************************************************/
CustomInputReader::CustomInputReader(const CharacterMapping& charMapping) :
TimedObject("CustomInputReader::read()"),
m_charMapping(charMapping)
{
addChild(&m_charMapping);
}
/******************************************************************************
* PUBLIC METHODS *************************************************************
*****************************************************************************/
void CustomInputReader::read(
void* const inputContext,
CustomGetNextInputFn_t inputFn,
const size_t maxLength,
const int batchSize,
int32_t* const inputHost,
int32_t* const inputLengthsHost,
int32_t* const inputSpacing)
{
startTiming();
// read input
std::vector<char> inputBuffer;
inputBuffer.reserve(maxLength * batchSize);
{
uint64_t sizeBytes;
const void* nextPtr;
size_t inputPos = 0;
while (true) {
sizeBytes = (maxLength * sizeof(*inputBuffer.data())) - inputPos;
const bool success = inputFn(inputContext, "INPUT", &nextPtr, &sizeBytes);
if (!success) {
throw std::runtime_error("CustomGetNextInputFn_t returned false while "
"reading input tensor.");
}
if (nextPtr == nullptr) {
// input is finished
break;
}
const size_t newSize = inputPos + sizeBytes;
if (newSize > maxLength * sizeof(*inputBuffer.data())) {
throw std::runtime_error(
"Input tensor is larger than expected: "
"next chunk of size "
+ std::to_string(sizeBytes) + " when already read "
+ std::to_string(inputPos) + " and tensor should be at most "
+ std::to_string(maxLength * sizeof(*inputBuffer.data())));
}
inputBuffer.resize((inputPos + sizeBytes) / sizeof(*inputBuffer.data()));
std::memcpy(
inputBuffer.data() + (inputPos / sizeof(*inputBuffer.data())),
nextPtr,
sizeBytes);
inputPos = newSize;
}
}
// currently mapping only translates
// from multi-byte/character to single sequence item, not the reverse, so the
// length will only decrease
// first pass to determine maximum length
int pos = 0;
int32_t maxInitLen = 0;
for (int i = 0; i < batchSize; ++i) {
const int length
= *reinterpret_cast<const int32_t*>(inputBuffer.data() + pos);
pos += sizeof(int32_t) + length;
maxInitLen = std::max(maxInitLen, length);
}
pos = 0;
for (int i = 0; i < batchSize; ++i) {
const int length
= *reinterpret_cast<const int32_t*>(inputBuffer.data() + pos);
pos += sizeof(int32_t);
size_t outputLen;
m_charMapping.map(
inputBuffer.data() + pos,
length,
inputHost + maxInitLen * i,
&outputLen);
inputLengthsHost[i] = static_cast<int32_t>(outputLen);
pos += length;
}
*inputSpacing = maxInitLen;
stopTiming();
}
void CustomInputReader::setCharacterMapping(const CharacterMapping& newMapping)
{
m_charMapping = newMapping;
}
|