relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
TensorFlow2/Recommendation/WideAndDeep/triton/runner/maintainer/docker
docker
maintainer
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Any, Dict, List, Optional, Union import docker if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...logger import LOGGER from ..maintainer import Maintainer from .container import DockerContainer from .containers import TritonServerContainer class DockerMaintainer(Maintainer): def triton_container( self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str] ) -> DockerContainer: """ Return triton container Args: command: Triton Server command that has to be executed image: Container image devices: List of device ids which has to be available in container volumes: Volumes mapping environment: Environment variables set in container log_file: File path where server logs has to be saved Returns: DockerContainer object """ return TritonServerContainer( name="triton-server", command=command, image=image, devices=devices, volumes=volumes, environment=environment, log_file=log_file, ) def build_image( self, *, image_file_path: pathlib.Path, image_name: str, workdir_path: Optional[pathlib.Path] = None, build_args: Optional[Dict[str, Any]] = None, ) -> None: workdir_path = workdir_path or image_file_path.parent build_args = build_args or {} LOGGER.info(f"Building {image_name} docker image.") LOGGER.debug(f" Using workdir: {workdir_path}") LOGGER.debug(f" Dockerfile: {image_file_path}") LOGGER.debug(f" Build args: {build_args}") build_logs = list() try: docker_client = docker.from_env() _, build_logs = docker_client.images.build( path=workdir_path.resolve().as_posix(), dockerfile=image_file_path.resolve().as_posix(), tag=image_name, buildargs=build_args, network_mode="host", rm=True, ) except docker.errors.BuildError as e: build_logs = e.build_log raise e finally: for chunk in build_logs: log = chunk.get("stream") if log: LOGGER.debug(log.rstrip())
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection
object_detection
faster_rcnn_box_coder
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Faster RCNN box coder. Faster RCNN box coder follows the coding schema described below: ty = (y - ya) / ha tx = (x - xa) / wa th = log(h / ha) tw = log(w / wa) where x, y, w, h denote the box's center coordinates, width and height respectively. Similarly, xa, ya, wa, ha denote the anchor's center coordinates, width and height. tx, ty, tw and th denote the anchor-encoded center, width and height respectively. See http://arxiv.org/abs/1506.01497 for details. """ import tensorflow as tf from mrcnn_tf2.object_detection import box_coder, box_list EPSILON = 1e-8 class FasterRcnnBoxCoder(box_coder.BoxCoder): """Faster RCNN box coder.""" def __init__(self, scale_factors=None): """Constructor for FasterRcnnBoxCoder. Args: scale_factors: List of 4 positive scalars to scale ty, tx, th and tw. If set to None, does not perform scaling. For Faster RCNN, the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0]. """ if scale_factors is not None: assert len(scale_factors) == 4 assert all([scalar > 0 for scalar in scale_factors]) self._scale_factors = scale_factors @property def code_size(self): return 4 def _encode(self, boxes, anchors): """Encode a box collection with respect to anchor collection. Args: boxes: BoxList holding N boxes to be encoded. anchors: BoxList of anchors. Returns: a tensor representing N anchor-encoded boxes of the format [ty, tx, th, tw]. """ # Convert anchors to the center coordinate representation. ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes() # Avoid NaN in division and log below. ha += EPSILON wa += EPSILON h += EPSILON w += EPSILON tx = (xcenter - xcenter_a) / wa ty = (ycenter - ycenter_a) / ha tw = tf.math.log(w / wa) th = tf.math.log(h / ha) # Scales location targets as used in paper for joint training. if self._scale_factors: ty *= self._scale_factors[0] tx *= self._scale_factors[1] th *= self._scale_factors[2] tw *= self._scale_factors[3] return tf.transpose(a=tf.stack([ty, tx, th, tw])) def _decode(self, rel_codes, anchors): """Decode relative codes to boxes. Args: rel_codes: a tensor representing N anchor-encoded boxes. anchors: BoxList of anchors. Returns: boxes: BoxList holding N bounding boxes. """ ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes() ty, tx, th, tw = tf.unstack(tf.transpose(a=rel_codes)) if self._scale_factors: ty /= self._scale_factors[0] tx /= self._scale_factors[1] th /= self._scale_factors[2] tw /= self._scale_factors[3] w = tf.exp(tw) * wa h = tf.exp(th) * ha ycenter = ty * ha + ycenter_a xcenter = tx * wa + xcenter_a ymin = ycenter - h / 2. xmin = xcenter - w / 2. ymax = ycenter + h / 2. xmax = xcenter + w / 2. return box_list.BoxList(tf.transpose(a=tf.stack([ymin, xmin, ymax, xmax])))
CUDA-Optimized/FastSpeech/tacotron2
tacotron2
model
# BSD 3-Clause License # Copyright (c) 2018-2020, NVIDIA Corporation # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """https://github.com/NVIDIA/tacotron2""" from math import sqrt import torch from torch.autograd import Variable from torch import nn from torch.nn import functional as F from tacotron2.layers import ConvNorm, LinearNorm from tacotron2.utils import to_gpu, get_mask_from_lengths class LocationLayer(nn.Module): def __init__(self, attention_n_filters, attention_kernel_size, attention_dim): super(LocationLayer, self).__init__() padding = int((attention_kernel_size - 1) / 2) self.location_conv = ConvNorm(2, attention_n_filters, kernel_size=attention_kernel_size, padding=padding, bias=False, stride=1, dilation=1) self.location_dense = LinearNorm(attention_n_filters, attention_dim, bias=False, w_init_gain='tanh') def forward(self, attention_weights_cat): processed_attention = self.location_conv(attention_weights_cat) processed_attention = processed_attention.transpose(1, 2) processed_attention = self.location_dense(processed_attention) return processed_attention class Attention(nn.Module): def __init__(self, attention_rnn_dim, embedding_dim, attention_dim, attention_location_n_filters, attention_location_kernel_size): super(Attention, self).__init__() self.query_layer = LinearNorm(attention_rnn_dim, attention_dim, bias=False, w_init_gain='tanh') self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False, w_init_gain='tanh') self.v = LinearNorm(attention_dim, 1, bias=False) self.location_layer = LocationLayer(attention_location_n_filters, attention_location_kernel_size, attention_dim) self.score_mask_value = -1e9 def get_alignment_energies(self, query, processed_memory, attention_weights_cat): """ PARAMS ------ query: decoder output (batch, n_mel_channels * n_frames_per_step) processed_memory: processed encoder outputs (B, T_in, attention_dim) attention_weights_cat: cumulative and prev. att weights (B, 2, max_time) RETURNS ------- alignment (batch, max_time) """ processed_query = self.query_layer(query.unsqueeze(1)) processed_attention_weights = self.location_layer( attention_weights_cat) energies = self.v(torch.tanh( processed_query + processed_attention_weights + processed_memory)) energies = energies.squeeze(-1) return energies def forward(self, attention_hidden_state, memory, processed_memory, attention_weights_cat, mask): """ PARAMS ------ attention_hidden_state: attention rnn last output memory: encoder outputs processed_memory: processed encoder outputs attention_weights_cat: previous and cummulative attention weights mask: binary mask for padded data """ alignment = self.get_alignment_energies( attention_hidden_state, processed_memory, attention_weights_cat) if mask is not None: alignment.data.masked_fill_(mask, self.score_mask_value) attention_weights = F.softmax(alignment, dim=1) attention_context = torch.bmm(attention_weights.unsqueeze(1), memory) attention_context = attention_context.squeeze(1) return attention_context, attention_weights class Prenet(nn.Module): def __init__(self, in_dim, sizes): super(Prenet, self).__init__() in_sizes = [in_dim] + sizes[:-1] self.layers = nn.ModuleList( [LinearNorm(in_size, out_size, bias=False) for (in_size, out_size) in zip(in_sizes, sizes)]) def forward(self, x): for linear in self.layers: x = F.dropout(F.relu(linear(x)), p=0.5, training=True) return x class Postnet(nn.Module): """Postnet - Five 1-d convolution with 512 channels and kernel size 5 """ def __init__(self, hparams): super(Postnet, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append( nn.Sequential( ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) for i in range(1, hparams.postnet_n_convolutions - 1): self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.postnet_embedding_dim, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int( (hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='tanh'), nn.BatchNorm1d(hparams.postnet_embedding_dim)) ) self.convolutions.append( nn.Sequential( ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels, kernel_size=hparams.postnet_kernel_size, stride=1, padding=int((hparams.postnet_kernel_size - 1) / 2), dilation=1, w_init_gain='linear'), nn.BatchNorm1d(hparams.n_mel_channels)) ) def forward(self, x): for i in range(len(self.convolutions) - 1): x = F.dropout(torch.tanh( self.convolutions[i](x)), 0.5, self.training) x = F.dropout(self.convolutions[-1](x), 0.5, self.training) return x class Encoder(nn.Module): """Encoder module: - Three 1-d convolution banks - Bidirectional LSTM """ def __init__(self, hparams): super(Encoder, self).__init__() convolutions = [] for _ in range(hparams.encoder_n_convolutions): conv_layer = nn.Sequential( ConvNorm(hparams.encoder_embedding_dim, hparams.encoder_embedding_dim, kernel_size=hparams.encoder_kernel_size, stride=1, padding=int((hparams.encoder_kernel_size - 1) / 2), dilation=1, w_init_gain='relu'), nn.BatchNorm1d(hparams.encoder_embedding_dim)) convolutions.append(conv_layer) self.convolutions = nn.ModuleList(convolutions) self.lstm = nn.LSTM(hparams.encoder_embedding_dim, int(hparams.encoder_embedding_dim / 2), 1, batch_first=True, bidirectional=True) def forward(self, x, input_lengths): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) # pytorch tensor are not reversible, hence the conversion input_lengths = input_lengths.cpu().numpy() x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs def inference(self, x): for conv in self.convolutions: x = F.dropout(F.relu(conv(x)), 0.5, self.training) x = x.transpose(1, 2) self.lstm.flatten_parameters() outputs, _ = self.lstm(x) return outputs class Decoder(nn.Module): def __init__(self, hparams): super(Decoder, self).__init__() self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.encoder_embedding_dim = hparams.encoder_embedding_dim self.attention_rnn_dim = hparams.attention_rnn_dim self.decoder_rnn_dim = hparams.decoder_rnn_dim self.prenet_dim = hparams.prenet_dim self.max_decoder_steps = hparams.max_decoder_steps self.gate_threshold = hparams.gate_threshold self.p_attention_dropout = hparams.p_attention_dropout self.p_decoder_dropout = hparams.p_decoder_dropout self.prenet = Prenet( hparams.n_mel_channels * hparams.n_frames_per_step, [hparams.prenet_dim, hparams.prenet_dim]) self.attention_rnn = nn.LSTMCell( hparams.prenet_dim + hparams.encoder_embedding_dim, hparams.attention_rnn_dim) self.attention_layer = Attention( hparams.attention_rnn_dim, hparams.encoder_embedding_dim, hparams.attention_dim, hparams.attention_location_n_filters, hparams.attention_location_kernel_size) self.decoder_rnn = nn.LSTMCell( hparams.attention_rnn_dim + hparams.encoder_embedding_dim, hparams.decoder_rnn_dim, 1) self.linear_projection = LinearNorm( hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, hparams.n_mel_channels * hparams.n_frames_per_step) self.gate_layer = LinearNorm( hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, 1, bias=True, w_init_gain='sigmoid') def get_go_frame(self, memory): """ Gets all zeros frames to use as first decoder input PARAMS ------ memory: decoder outputs RETURNS ------- decoder_input: all zeros frames """ B = memory.size(0) decoder_input = Variable(memory.data.new( B, self.n_mel_channels * self.n_frames_per_step).zero_()) return decoder_input def initialize_decoder_states(self, memory, mask): """ Initializes attention rnn states, decoder rnn states, attention weights, attention cumulative weights, attention context, stores memory and stores processed memory PARAMS ------ memory: Encoder outputs mask: Mask for padded data if training, expects None for inference """ B = memory.size(0) MAX_TIME = memory.size(1) self.attention_hidden = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.attention_cell = Variable(memory.data.new( B, self.attention_rnn_dim).zero_()) self.decoder_hidden = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.decoder_cell = Variable(memory.data.new( B, self.decoder_rnn_dim).zero_()) self.attention_weights = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_weights_cum = Variable(memory.data.new( B, MAX_TIME).zero_()) self.attention_context = Variable(memory.data.new( B, self.encoder_embedding_dim).zero_()) self.memory = memory self.processed_memory = self.attention_layer.memory_layer(memory) self.mask = mask def parse_decoder_inputs(self, decoder_inputs): """ Prepares decoder inputs, i.e. mel outputs PARAMS ------ decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs RETURNS ------- inputs: processed decoder inputs """ # (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels) decoder_inputs = decoder_inputs.transpose(1, 2) decoder_inputs = decoder_inputs.view( decoder_inputs.size(0), int(decoder_inputs.size(1)/self.n_frames_per_step), -1) # (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels) decoder_inputs = decoder_inputs.transpose(0, 1) return decoder_inputs def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments): """ Prepares decoder outputs for output PARAMS ------ mel_outputs: gate_outputs: gate output energies alignments: RETURNS ------- mel_outputs: gate_outpust: gate output energies alignments: """ # (T_out, B) -> (B, T_out) alignments = torch.stack(alignments).transpose(0, 1) # (T_out, B) -> (B, T_out) gate_outputs = torch.stack(gate_outputs).transpose(0, 1) gate_outputs = gate_outputs.contiguous() # (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels) mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous() # decouple frames per step mel_outputs = mel_outputs.view( mel_outputs.size(0), -1, self.n_mel_channels) # (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out) mel_outputs = mel_outputs.transpose(1, 2) return mel_outputs, gate_outputs, alignments def decode(self, decoder_input): """ Decoder step using stored states, attention and memory PARAMS ------ decoder_input: previous mel output RETURNS ------- mel_output: gate_output: gate output energies attention_weights: """ cell_input = torch.cat((decoder_input, self.attention_context), -1) self.attention_hidden, self.attention_cell = self.attention_rnn( cell_input, (self.attention_hidden, self.attention_cell)) self.attention_hidden = F.dropout( self.attention_hidden, self.p_attention_dropout, self.training) attention_weights_cat = torch.cat( (self.attention_weights.unsqueeze(1), self.attention_weights_cum.unsqueeze(1)), dim=1) self.attention_context, self.attention_weights = self.attention_layer( self.attention_hidden, self.memory, self.processed_memory, attention_weights_cat, self.mask) self.attention_weights_cum += self.attention_weights decoder_input = torch.cat( (self.attention_hidden, self.attention_context), -1) self.decoder_hidden, self.decoder_cell = self.decoder_rnn( decoder_input, (self.decoder_hidden, self.decoder_cell)) self.decoder_hidden = F.dropout( self.decoder_hidden, self.p_decoder_dropout, self.training) decoder_hidden_attention_context = torch.cat( (self.decoder_hidden, self.attention_context), dim=1) decoder_output = self.linear_projection( decoder_hidden_attention_context) gate_prediction = self.gate_layer(decoder_hidden_attention_context) return decoder_output, gate_prediction, self.attention_weights def forward(self, memory, decoder_inputs, memory_lengths): """ Decoder forward pass for training PARAMS ------ memory: Encoder outputs decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs memory_lengths: Encoder output lengths for attention masking. RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory).unsqueeze(0) decoder_inputs = self.parse_decoder_inputs(decoder_inputs) decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0) decoder_inputs = self.prenet(decoder_inputs) self.initialize_decoder_states( memory, mask=~get_mask_from_lengths(memory_lengths)) mel_outputs, gate_outputs, alignments = [], [], [] while len(mel_outputs) < decoder_inputs.size(0) - 1: decoder_input = decoder_inputs[len(mel_outputs)] mel_output, gate_output, attention_weights = self.decode( decoder_input) mel_outputs += [mel_output] gate_outputs += [gate_output.squeeze(1)] alignments += [attention_weights] mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments def inference(self, memory): """ Decoder inference PARAMS ------ memory: Encoder outputs RETURNS ------- mel_outputs: mel outputs from the decoder gate_outputs: gate outputs from the decoder alignments: sequence of attention weights from the decoder """ decoder_input = self.get_go_frame(memory) self.initialize_decoder_states(memory, mask=None) mel_outputs, gate_outputs, alignments = [], [], [] while True: decoder_input = self.prenet(decoder_input) mel_output, gate_output, alignment = self.decode(decoder_input) mel_outputs += [mel_output.squeeze(1)] gate_outputs += [gate_output] alignments += [alignment] if torch.sigmoid(gate_output.data) > self.gate_threshold: break elif len(mel_outputs) == self.max_decoder_steps: # print("Warning! Reached max decoder steps") break decoder_input = mel_output mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs( mel_outputs, gate_outputs, alignments) return mel_outputs, gate_outputs, alignments class Tacotron2(nn.Module): def __init__(self, hparams): super(Tacotron2, self).__init__() self.mask_padding = hparams.mask_padding self.fp16_run = hparams.fp16_run self.n_mel_channels = hparams.n_mel_channels self.n_frames_per_step = hparams.n_frames_per_step self.embedding = nn.Embedding( hparams.n_symbols, hparams.symbols_embedding_dim) std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim)) val = sqrt(3.0) * std # uniform bounds for std self.embedding.weight.data.uniform_(-val, val) self.encoder = Encoder(hparams) self.decoder = Decoder(hparams) self.postnet = Postnet(hparams) def parse_batch(self, batch): text_padded, input_lengths, mel_padded, gate_padded, \ output_lengths = batch text_padded = to_gpu(text_padded).long() input_lengths = to_gpu(input_lengths).long() max_len = torch.max(input_lengths.data).item() mel_padded = to_gpu(mel_padded).float() gate_padded = to_gpu(gate_padded).float() output_lengths = to_gpu(output_lengths).long() return ( (text_padded, input_lengths, mel_padded, max_len, output_lengths), (mel_padded, gate_padded)) def parse_output(self, outputs, output_lengths=None): if self.mask_padding and output_lengths is not None: mask = ~get_mask_from_lengths(output_lengths) mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1)) mask = mask.permute(1, 0, 2) outputs[0].data.masked_fill_(mask, 0.0) outputs[1].data.masked_fill_(mask, 0.0) outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies return outputs def forward(self, inputs): text_inputs, text_lengths, mels, max_len, output_lengths = inputs text_lengths, output_lengths = text_lengths.data, output_lengths.data embedded_inputs = self.embedding(text_inputs).transpose(1, 2) encoder_outputs = self.encoder(embedded_inputs, text_lengths) mel_outputs, gate_outputs, alignments = self.decoder( encoder_outputs, mels, memory_lengths=text_lengths) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet return self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments], output_lengths) def inference(self, inputs): embedded_inputs = self.embedding(inputs).transpose(1, 2) encoder_outputs = self.encoder.inference(embedded_inputs) mel_outputs, gate_outputs, alignments = self.decoder.inference( encoder_outputs) mel_outputs_postnet = self.postnet(mel_outputs) mel_outputs_postnet = mel_outputs + mel_outputs_postnet outputs = self.parse_output( [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]) return outputs
PyTorch/SpeechSynthesis/HiFiGAN
HiFiGAN
export
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch import models def parse_args(parser): """ Parse commandline arguments. """ parser.add_argument('model_name', type=str, choices=['HiFi-GAN', 'FastPitch'], help='Name of the converted model') parser.add_argument('input_ckpt', type=str, help='Path to the input checkpoint') parser.add_argument('output_ckpt', default=None, help='Path to save the output checkpoint to') parser.add_argument('--cuda', action='store_true', help='Move model weights to GPU before export') parser.add_argument('--amp', action='store_true', help='Convert model to FP16 prior to saving') parser.add_argument('--load-from', type=str, default='pyt', choices=['pyt', 'ts'], help='Source checkpoint format') parser.add_argument('--convert-to', type=str, default='ts', choices=['ts', 'ttrt'], help='Output checkpoint format') return parser def main(): """ Exports PyT or TorchScript checkpoint to TorchScript or Torch-TensorRT. """ parser = argparse.ArgumentParser(description='PyTorch model export', allow_abbrev=False) parser = parse_args(parser) args, unk_args = parser.parse_known_args() device = torch.device('cuda' if args.cuda else 'cpu') assert args.load_from != args.convert_to, \ 'Load and convert formats must be different' print(f'Converting {args.model_name} from "{args.load_from}"' f' to "{args.convert_to}" ({device}).') if args.load_from == 'ts': ts_model, _ = models.load_and_setup_ts_model(args.model_name, args.input_ckpt, args.amp, device) else: assert args.load_from == 'pyt' pyt_model, _ = models.load_pyt_model_for_infer( args.model_name, parser, args.input_ckpt, args.amp, device, unk_args=unk_args, jitable=True) ts_model = torch.jit.script(pyt_model) if args.convert_to == 'ts': torch.jit.save(ts_model, args.output_ckpt) else: assert args.convert_to == 'ttrt' trt_model = models.convert_ts_to_trt('HiFi-GAN', ts_model, parser, args.amp, unk_args) torch.jit.save(trt_model, args.output_ckpt) print(f'{args.model_name}: checkpoint saved to {args.output_ckpt}.') if unk_args: print(f'Warning: encountered unknown program options: {unk_args}') if __name__ == '__main__': main()
PyTorch/LanguageModeling/BERT/triton/dist6l/runner
runner
start_NVIDIA-T4
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Install Docker . /etc/os-release && \ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \ curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \ curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \ apt-get update && \ apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2 # Install packages pip install -r triton/runner/requirements.txt # Evaluate Runner python3 -m "triton.dist6l.runner.__main__" \ --config-path "triton/dist6l/runner/config_NVIDIA-T4.yaml" \ --device 0
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library
library
tf
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Iterable, Optional, Tuple, Union import numpy as np # pytype: disable=import-error import tensorflow as tf from tensorflow.python.eager import wrap_function from tf2onnx.shape_inference import infer_shape from tf2onnx.tf_loader import ( freeze_session, from_function, inputs_without_resource, is_function, remove_redundant_inputs, tf_optimize, ) # pytype: enable=import-error from ..args import filter_fn_args from ..core import ( GET_MODEL_FN_NAME, GET_SERVING_INPUT_RECEIVER_FN, BaseConverter, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec, load_from_file, ) from ..extensions import converters, loaders, runners, savers from .utils import infer_precision LOGGER = logging.getLogger(__name__) def is_tf2(): return tf.__version__.startswith("2.") def create_session_config(*, allow_growth=False, use_xla=False, gpu_memory_fraction=1.0): gpu_options = tf.compat.v1.GPUOptions( per_process_gpu_memory_fraction=gpu_memory_fraction, allow_growth=allow_growth ) config = tf.compat.v1.ConfigProto(gpu_options=gpu_options) if use_xla: config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 LOGGER.debug( f"Using gpu memory fraction: allow_growth={allow_growth} " f"gpu_memory_fraction={gpu_memory_fraction} " f"use_xla={use_xla}" ) return config class TFTRTConverter(BaseConverter): def __init__( self, *, is_dynamic_op: bool = False, minimum_segment_size: int = 3, max_batch_size: int = 1, max_workspace_size: int = (4 << 30) - 1000, # ~3.999GB maximum_cached_engines: int = 1000, precision: str, ): self._is_dynamic_op = is_dynamic_op self._minimum_segment_size = minimum_segment_size self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._maximum_cached_engines = maximum_cached_engines self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: # https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html # converting graph_def is not supported in TF2 from tensorflow.python.compiler.tensorrt import trt_convert # pytype: disable=import-error assert isinstance(model.handle, tf.compat.v1.GraphDef) session_config = create_session_config(allow_growth=True) output_node_names = [spec.name.split(":")[0] for spec in model.outputs.values()] converter = trt_convert.TrtGraphConverter( input_graph_def=model.handle, session_config=session_config, nodes_blacklist=output_node_names, is_dynamic_op=self._is_dynamic_op, precision_mode=self._precision.value, max_workspace_size_bytes=self._max_workspace_size, maximum_cached_engines=self._maximum_cached_engines, max_batch_size=self._max_batch_size, minimum_segment_size=self._minimum_segment_size, ) graph_def = converter.convert() return model._replace(handle=graph_def) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 def _from_saved_model_v1(sess, model_path, tag, signatures): """ Load tensorflow graph from saved_model. NOTICE: Modified version from tf2onnx project """ wrn_no_tag = "'--tag' not specified for saved_model. Using --tag serve" wrn_empty_tag = "'--tag' value is empty string. Using tag =[[]]" if tag is None: tag = [tf.saved_model.SERVING] LOGGER.warning(wrn_no_tag) if tag == "": tag = [[]] LOGGER.warning(wrn_empty_tag) if not isinstance(tag, list): tag = [tag] imported = tf.compat.v1.saved_model.loader.load(sess, tag, model_path) for k in imported.signature_def.keys(): if k.startswith("_"): # consider signatures starting with '_' private continue signatures.append(k) try: from tensorflow.contrib.saved_model.python.saved_model import ( # pytype: disable=import-error signature_def_utils, ) # pylint: disable=unnecessary-lambda get_signature_def = lambda meta_graph_def, k: signature_def_utils.get_signature_def_by_key(meta_graph_def, k) except ImportError: # TF1.12 changed the api get_signature_def = lambda meta_graph_def, k: meta_graph_def.signature_def[k] inputs = {} outputs = {} for k in signatures: inputs_tensor_info = get_signature_def(imported, k).inputs for name, input_tensor in inputs_tensor_info.items(): inputs[name] = input_tensor.name outputs_tensor_info = get_signature_def(imported, k).outputs for name, output_tensor in outputs_tensor_info.items(): outputs[name] = output_tensor.name frozen_graph = freeze_session(sess, input_names=list(inputs.values()), output_names=list(outputs.values())) return frozen_graph, inputs, outputs def _infer_model_precision( tf_graph: tf.compat.v1.GraphDef, inputs_dict: Dict[str, TensorSpec], outputs_dict: Dict[str, TensorSpec] ) -> Optional[Precision]: import networkx as nx def _get_dtype(node_def): node_type = node_def.attr.get("T", None) or node_def.attr.get("dtype", None) if node_type: if node_type.list.type: assert len(set(node_type.list.type)) == 1 node_type = tf.dtypes.DType(node_type.list.type[0]) else: node_type = tf.dtypes.DType(node_type.type) return np.dtype(node_type.as_numpy_dtype()) if node_type and node_type.is_numpy_compatible else node_type # build directed graph nx_graph = nx.DiGraph() for node_def in tf_graph.node: nx_graph.add_node( node_def.name, op=node_def.op, **{key: value for key, value in node_def.attr.items() if key not in ["value", "dtype"]}, dtype=_get_dtype(node_def), ) for input in node_def.input: nx_graph.add_edge(input, node_def.name) input_names = [spec.name.split(":")[0] for spec in inputs_dict.values()] output_names = [spec.name.split(":")[0] for spec in outputs_dict.values()] most_common_dtype = infer_precision(nx_graph, input_names, output_names, _get_dtype) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class TFEstimatorLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) get_serving_input_receiver_fn = load_from_file(model_path, "model", GET_SERVING_INPUT_RECEIVER_FN) if get_model is None: raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}") if get_serving_input_receiver_fn is None: raise RuntimeError(f"Could not find {GET_SERVING_INPUT_RECEIVER_FN} in {model_path}") model_args = filter_fn_args(self._model_args, fn=get_model) serving_input_receiver_args = filter_fn_args(self._model_args, fn=get_serving_input_receiver_fn) session_config = create_session_config(allow_growth=True) tf.compat.v1.reset_default_graph() with tf.compat.v1.Session(config=session_config) as sess: estimator = get_model(**model_args) serving_input_receiver_fn = get_serving_input_receiver_fn(**serving_input_receiver_args) input_receiver = serving_input_receiver_fn() estimator_spec = estimator.model_fn( features=input_receiver.features, labels=None, mode=tf.estimator.ModeKeys.PREDICT, config=estimator.config, ) input_tensors_dict = input_receiver.receiver_tensors output_tensors_dict = estimator_spec.predictions inputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in input_tensors_dict.items()} outputs_dict = {k: tensor2tensor_spec(tensor) for k, tensor in output_tensors_dict.items()} input_tensor_names = [t.name for t in inputs_dict.values()] output_tensor_names = [t.name for t in outputs_dict.values()] graph_saver = estimator_spec.scaffold.saver or tf.compat.v1.train.Saver(sharded=True) graph_saver.restore(sess, estimator.latest_checkpoint()) input_tensor_names = inputs_without_resource(sess, input_tensor_names) frozen_graph = freeze_session(sess, input_names=input_tensor_names, output_names=output_tensor_names) input_tensor_names = remove_redundant_inputs(frozen_graph, input_tensor_names) tf.compat.v1.reset_default_graph() with tf.compat.v1.Session(config=estimator.config.session_config): frozen_graph = tf_optimize(input_tensor_names, output_tensor_names, frozen_graph) tf.compat.v1.reset_default_graph() precision = _infer_model_precision(frozen_graph, inputs_dict, outputs_dict) return Model(frozen_graph, precision, inputs_dict, outputs_dict) class TFKerasLoader(BaseLoader): """ Loads keras model from source code The large-model flag helps loading model which exceeds maximum protobuf size of 2GB. By default it is disabled. The tf-allow-growth flag control limiting GPU memory growth feature (https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled. """ required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, large_model: bool = False, tf_allow_growth: bool = False, **kwargs): self._large_model = large_model self._allow_growth = tf_allow_growth self._model_args = kwargs def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) if get_model is None: raise RuntimeError(f"Could not find {GET_MODEL_FN_NAME} in {model_path}") model_args = filter_fn_args(self._model_args, fn=get_model) if self._allow_growth: physical_devices = tf.config.experimental.list_physical_devices("GPU") for device in physical_devices: tf.config.experimental.set_memory_growth(device, True) tf.keras.backend.clear_session() tf.keras.backend.set_learning_phase(False) eager_model, call_fn = get_model(**model_args) inputs_dict: Dict[str, TensorSpec] = { input_name: TensorSpec(t.name, t.dtype.name, tuple(t.shape.as_list())) for input_name, t in zip(eager_model.input_names, eager_model.inputs) } concrete_func = call_fn.get_concrete_function( *[tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in inputs_dict.items()] ) input_tensors_names = [tensor.name for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource] output_tensors_names = [tensor.name for tensor in concrete_func.outputs] graph_def = from_function( concrete_func, input_tensors_names, output_tensors_names, large_model=self._large_model ) # tensor names changes after wrapping with call_fn, thus need to use those from concrete_func outputs_dict: Dict[str, TensorSpec] = { output_name: TensorSpec(output_tensor_name, t.dtype.name, tuple(t.shape.as_list())) for output_name, output_tensor_name, t in zip( eager_model.output_names, output_tensors_names, eager_model.outputs ) } precision = _infer_model_precision(graph_def, inputs_dict, outputs_dict) tf.keras.backend.clear_session() tf.keras.backend.set_learning_phase(False) def _add_suffix_as_quickfix_for_tf24_func_refactor(spec): if not spec.name.endswith(":0"): spec = spec._replace(name=spec.name + ":0") return spec inputs_dict = {name: _add_suffix_as_quickfix_for_tf24_func_refactor(spec) for name, spec in inputs_dict.items()} return Model(graph_def, precision, inputs_dict, outputs_dict) class TFSavedModelLoader(BaseLoader): def load(self, model_path: Union[str, Path], **kwargs) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() tf.compat.v1.reset_default_graph() if is_tf2(): from tf2onnx.tf_loader import _from_saved_model_v2 # pytype: disable=import-error graph_def, input_names, output_names, concrete_func, imported, initialized_tables = _from_saved_model_v2( model_path=model_path, input_names=None, output_names=None, tag=None, signature_def=[], concrete_function_index=None, large_model=False, ) # inspired by https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/saved_model_cli.py#L205 if concrete_func.structured_input_signature: input_args, input_kwargs = concrete_func.structured_input_signature input_names = list(input_kwargs) assert ( not input_args ), f"Not supported args in concrete function signature args={input_args}, kwargs={input_kwargs}" elif concrete_func._arg_keywords: # pylint: disable=protected-access # For pure ConcreteFunctions we might have nothing better than _arg_keywords. assert concrete_func._num_positional_args in [0, 1] input_names = concrete_func._arg_keywords input_tensors = [tensor for tensor in concrete_func.inputs if tensor.dtype != tf.dtypes.resource] inputs = {name: tensor.name for name, tensor in zip(input_names, input_tensors)} # they are already flattened output_tensors = [tensor for tensor in concrete_func.outputs if tensor.dtype != tf.dtypes.resource] output_names = sorted(concrete_func.structured_outputs) # because outputs are in flatten form outputs = {name: tensor.name for name, tensor in zip(output_names, output_tensors)} else: session_config = create_session_config(allow_growth=True) with tf.compat.v1.Session(config=session_config) as sess: graph_def, inputs, outputs = _from_saved_model_v1(sess, model_path, tag=None, signatures=[]) inputs, outputs = handle_tensor_specs(graph_def, inputs, outputs) precision = _infer_model_precision(graph_def, inputs, outputs) return Model(graph_def, precision, inputs, outputs) class TFRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): if is_tf2(): return TF2RunnerSession(model=model) else: return TF1RunnerSession(model=model) class TF1RunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, tf.compat.v1.GraphDef) self._inputs = None self._outputs = None self._session = None self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() tf.compat.v1.reset_default_graph() session_config = create_session_config(allow_growth=True) self._session = tf.compat.v1.Session(config=session_config) self._session.__enter__() tf.import_graph_def(self._model.handle, name="") self._inputs = { name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.inputs.items() } self._outputs = { name: self._session.graph.get_tensor_by_name(spec.name) for name, spec in self._model.outputs.items() } return self def __exit__(self, exc_type, exc_value, traceback): self._session.__exit__(exc_type, exc_value, traceback) tf.compat.v1.reset_default_graph() self._inputs = None self._outputs = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {placeholder: x[name] for name, placeholder in self._inputs.items()} return self._session.run(self._outputs, feed_dict=feed_dict) class TF2RunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, tf.compat.v1.GraphDef) self._concrete_func = None def __enter__(self): tf.compat.v1.reset_default_graph() input_tensor_names = [spec.name for spec in self._model.inputs.values()] output_tensor_names = [spec.name for spec in self._model.outputs.values()] self._concrete_func = wrap_function.function_from_graph_def( self._model.handle, input_tensor_names, output_tensor_names ) self._concrete_func._signature = [ tf.TensorSpec(shape=spec.shape, dtype=spec.dtype, name=name) for name, spec in self._model.inputs.items() ] return self def __exit__(self, exc_type, exc_value, traceback): self._concrete_func = None tf.compat.v1.reset_default_graph() def __call__(self, x: Dict[str, object]): x = tf.nest.map_structure(tf.convert_to_tensor, x) y_pred = self._concrete_func(**x) output_struct = {name: spec.name for name, spec in self._model.outputs.items()} y_pred = tf.nest.map_structure(lambda t: t.numpy(), y_pred) y_pred = tf.nest.pack_sequence_as(output_struct, y_pred) return y_pred class TFSavedModelSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path]) -> None: if isinstance(model_path, Path): model_path = model_path.as_posix() session_config = create_session_config(allow_growth=True) with tf.compat.v1.Session(config=session_config) as sess: tf.import_graph_def(model.handle, name="") is_func = is_function(sess.graph) if not is_func: infer_shape(sess.graph, {}) inputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.inputs.items()} outputs = {name: sess.graph.get_tensor_by_name(spec.name) for name, spec in model.outputs.items()} def _ensure_shape(tensors_dict, tensors_specs): for name, tensor in tensors_dict.items(): if tensor.shape.rank is None: tensor.set_shape(tensors_specs[name].shape) return tensors_dict inputs = _ensure_shape(inputs, model.inputs) outputs = _ensure_shape(outputs, model.outputs) LOGGER.info(inputs) LOGGER.info(outputs) tf.compat.v1.saved_model.simple_save(sess, model_path, inputs, outputs, legacy_init_op=None) def handle_tensor_specs( graph_def, inputs: Dict[str, str], outputs: Dict[str, str] ) -> Tuple[Dict[str, TensorSpec], Dict[str, TensorSpec]]: session_config = tf.compat.v1.ConfigProto(graph_options=tf.compat.v1.GraphOptions(infer_shapes=True)) tf.compat.v1.reset_default_graph() with tf.compat.v1.Session(config=session_config) as sess: tf.import_graph_def(graph_def, name="") def _get_spec(tensors_dict): tensors_dict = {name: sess.graph.get_tensor_by_name(tname) for name, tname in tensors_dict.items()} return {name: tensor2tensor_spec(tensor) for name, tensor in tensors_dict.items()} inputs = _get_spec(inputs) outputs = _get_spec(outputs) tf.compat.v1.reset_default_graph() return inputs, outputs def tensor2tensor_spec(tensor): shape = tuple([s.value if hasattr(s, "value") else s for s in tensor.shape]) return TensorSpec(tensor.name, tensor.dtype.name, shape) loaders.register_extension(Format.TF_ESTIMATOR.value, TFEstimatorLoader) loaders.register_extension(Format.TF_KERAS.value, TFKerasLoader) loaders.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelLoader) loaders.register_extension(Format.TF_TRT.value, TFSavedModelLoader) converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.TF_SAVEDMODEL.value}", None) converters.register_extension(f"{Format.TF_KERAS.value}--{Format.TF_SAVEDMODEL.value}", None) converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.TF_SAVEDMODEL.value}", None) converters.register_extension(f"{Format.TF_ESTIMATOR.value}--{Format.TF_TRT.value}", TFTRTConverter) converters.register_extension(f"{Format.TF_KERAS.value}--{Format.TF_TRT.value}", TFTRTConverter) converters.register_extension(f"{Format.TF_SAVEDMODEL.value}--{Format.TF_TRT.value}", TFTRTConverter) savers.register_extension(Format.TF_SAVEDMODEL.value, TFSavedModelSaver) savers.register_extension(Format.TF_TRT.value, TFSavedModelSaver) runners.register_extension(Format.TF_ESTIMATOR.value, TFRunner) runners.register_extension(Format.TF_KERAS.value, TFRunner) runners.register_extension(Format.TF_SAVEDMODEL.value, TFRunner) runners.register_extension(Format.TF_TRT.value, TFRunner)
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/deployment_toolkit/perf_analyzer
perf_analyzer
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .perf_analyzer import PerfAnalyzer # noqa: F401 from .perf_config import PerfAnalyzerConfig # noqa: F401
TensorFlow/Detection/SSD/models/research/object_detection/core
core
balanced_positive_negative_sampler_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.balanced_positive_negative_sampler.""" import numpy as np import tensorflow as tf from object_detection.core import balanced_positive_negative_sampler from object_detection.utils import test_case class BalancedPositiveNegativeSamplerTest(test_case.TestCase): def test_subsample_all_examples_dynamic(self): numpy_labels = np.random.permutation(300) indicator = tf.constant(np.ones(300) == 1) numpy_labels = (numpy_labels - 200) > 0 labels = tf.constant(numpy_labels) sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) is_sampled = sampler.subsample(indicator, 64, labels) with self.test_session() as sess: is_sampled = sess.run(is_sampled) self.assertTrue(sum(is_sampled) == 64) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 32) self.assertTrue(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)) == 32) def test_subsample_all_examples_static(self): numpy_labels = np.random.permutation(300) indicator = np.array(np.ones(300) == 1, np.bool) numpy_labels = (numpy_labels - 200) > 0 labels = np.array(numpy_labels, np.bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute(graph_fn, [indicator, labels]) self.assertTrue(sum(is_sampled) == 64) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 32) self.assertTrue(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)) == 32) def test_subsample_selection_dynamic(self): # Test random sampling when only some examples can be sampled: # 100 samples, 20 positives, 10 positives cannot be sampled numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 90 indicator = tf.constant(numpy_indicator) numpy_labels = (numpy_labels - 80) >= 0 labels = tf.constant(numpy_labels) sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) is_sampled = sampler.subsample(indicator, 64, labels) with self.test_session() as sess: is_sampled = sess.run(is_sampled) self.assertTrue(sum(is_sampled) == 64) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 10) self.assertTrue(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)) == 54) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_static(self): # Test random sampling when only some examples can be sampled: # 100 samples, 20 positives, 10 positives cannot be sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 90 indicator = np.array(numpy_indicator, np.bool) numpy_labels = (numpy_labels - 80) >= 0 labels = np.array(numpy_labels, np.bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute(graph_fn, [indicator, labels]) self.assertTrue(sum(is_sampled) == 64) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 10) self.assertTrue(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)) == 54) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_larger_batch_size_dynamic(self): # Test random sampling when total number of examples that can be sampled are # less than batch size: # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 60 indicator = tf.constant(numpy_indicator) numpy_labels = (numpy_labels - 50) >= 0 labels = tf.constant(numpy_labels) sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) is_sampled = sampler.subsample(indicator, 64, labels) with self.test_session() as sess: is_sampled = sess.run(is_sampled) self.assertTrue(sum(is_sampled) == 60) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 10) self.assertTrue( sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) == 50) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_larger_batch_size_static(self): # Test random sampling when total number of examples that can be sampled are # less than batch size: # 100 samples, 50 positives, 40 positives cannot be sampled, batch size 64. # It should still return 64 samples, with 4 of them that couldn't have been # sampled. numpy_labels = np.arange(100) numpy_indicator = numpy_labels < 60 indicator = np.array(numpy_indicator, np.bool) numpy_labels = (numpy_labels - 50) >= 0 labels = np.array(numpy_labels, np.bool) def graph_fn(indicator, labels): sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler( is_static=True)) return sampler.subsample(indicator, 64, labels) is_sampled = self.execute(graph_fn, [indicator, labels]) self.assertTrue(sum(is_sampled) == 64) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) >= 10) self.assertTrue( sum(np.logical_and(np.logical_not(numpy_labels), is_sampled)) >= 50) self.assertTrue(sum(np.logical_and(is_sampled, numpy_indicator)) == 60) def test_subsample_selection_no_batch_size(self): # Test random sampling when only some examples can be sampled: # 1000 samples, 6 positives (5 can be sampled). numpy_labels = np.arange(1000) numpy_indicator = numpy_labels < 999 indicator = tf.constant(numpy_indicator) numpy_labels = (numpy_labels - 994) >= 0 labels = tf.constant(numpy_labels) sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler(0.01)) is_sampled = sampler.subsample(indicator, None, labels) with self.test_session() as sess: is_sampled = sess.run(is_sampled) self.assertTrue(sum(is_sampled) == 500) self.assertTrue(sum(np.logical_and(numpy_labels, is_sampled)) == 5) self.assertTrue(sum(np.logical_and( np.logical_not(numpy_labels), is_sampled)) == 495) self.assertAllEqual(is_sampled, np.logical_and(is_sampled, numpy_indicator)) def test_subsample_selection_no_batch_size_static(self): labels = tf.constant([[True, False, False]]) indicator = tf.constant([True, False, True]) sampler = ( balanced_positive_negative_sampler.BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, None, labels) def test_raises_error_with_incorrect_label_shape(self): labels = tf.constant([[True, False, False]]) indicator = tf.constant([True, False, True]) sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, 64, labels) def test_raises_error_with_incorrect_indicator_shape(self): labels = tf.constant([True, False, False]) indicator = tf.constant([[True, False, True]]) sampler = (balanced_positive_negative_sampler. BalancedPositiveNegativeSampler()) with self.assertRaises(ValueError): sampler.subsample(indicator, 64, labels) if __name__ == '__main__': tf.test.main()
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/losses
losses
weighted_sparse_categorical_crossentropy_test
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for masked LM loss.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling import networks from official.nlp.modeling.losses import weighted_sparse_categorical_crossentropy @keras_parameterized.run_all_keras_modes class ClassificationLossTest(keras_parameterized.TestCase): def create_lm_model(self, vocab_size, sequence_length, hidden_size, num_predictions, output="predictions"): # First, create a transformer stack that we can use to get the LM's # vocabulary weight. xformer_stack = networks.TransformerEncoder( vocab_size=vocab_size, num_layers=1, sequence_length=sequence_length, hidden_size=hidden_size, num_attention_heads=4, ) word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) lm_outputs, _ = xformer_stack([word_ids, mask, type_ids]) # Create a maskedLM from the transformer stack. test_network = networks.MaskedLM( num_predictions=num_predictions, input_width=lm_outputs.shape[-1], source_network=xformer_stack, output=output) # Create a model from the masked LM layer. lm_input_tensor = tf.keras.Input(shape=(sequence_length, hidden_size)) masked_lm_positions = tf.keras.Input( shape=(num_predictions,), dtype=tf.int32) output = test_network([lm_input_tensor, masked_lm_positions]) return tf.keras.Model([lm_input_tensor, masked_lm_positions], output) def create_classification_model(self, input_width, num_classes): test_object = networks.Classification( input_width=input_width, num_classes=num_classes) # Create a 2-dimensional input (the first dimension is implicit). pooled_data = tf.keras.Input(shape=(input_width,), dtype=tf.float32) output = test_object(pooled_data) return tf.keras.Model(pooled_data, output) def test_per_example_loss_3d_input(self): """Test per-example loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate per-example loss. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) # Per-example loss data should have one value per prediction, and those # values shouldn't be zero in this case (as we're using random data). expected_shape = [batch_size, num_predictions] self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_per_example_loss_2d_input(self): """Test per-example loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per example loss. labels = np.random.randint(num_classes, size=(batch_size)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) # Per-example loss data should have one value per batch item, and those # values shouldn't be zero in this case (as we're using random data). self.assertEqual([batch_size], per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_per_example_loss_weights_3d_input(self): """Test weighted per-example loss with a 3-d input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate per-example loss with weights. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) weights = np.random.randint(2, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) # Weighted per-example loss data should be equivalent to multiplying the # loss tensor by the weights tensor. expected_weighted_loss = per_example_loss_data * weights self.assertAllClose(expected_weighted_loss, per_example_loss_data) def test_per_example_loss_weights_2d_input(self): """Test weighted per-example loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per-example loss with weights. labels = np.random.randint(num_classes, size=(batch_size)) weights = np.random.randint(2, size=(batch_size)) per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) # Weighted per-example loss data should be equivalent to multiplying the # loss tensor by the weights tensor. expected_weighted_loss = per_example_loss_data * weights self.assertAllClose(expected_weighted_loss, per_example_loss_data) def test_loss_3d_input(self): """Test overall loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate loss. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) weights = np.random.randint(2, size=(batch_size, num_predictions)) per_example_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) # Total loss data should have one value, and that value shouldn't be zero # in this case (as we're using random data). expected_shape = [] # Scalar self.assertEqual(expected_shape, per_example_loss_data.shape.as_list()) self.assertNotAllClose( tf.zeros_like(per_example_loss_data), per_example_loss_data) def test_loss_2d_input(self): """Test overall loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate per example loss. labels = np.random.randint(num_classes, size=(batch_size)) loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels) # Loss data should have one value only, and that value shouldn't be zero in # this case (as we're using random data). self.assertNotAllClose(0, loss_data) def test_loss_weights_3d_input(self): """Test masked loss with a 3-dimensional input, from a masked LM.""" vocab_size = 100 sequence_length = 32 hidden_size = 64 num_predictions = 21 model = self.create_lm_model( vocab_size=vocab_size, sequence_length=sequence_length, hidden_size=hidden_size, num_predictions=num_predictions) # Get the output of the masked LM. batch_size = 3 lm_input_data = 10 * np.random.random_sample( (batch_size, sequence_length, hidden_size)) masked_position_data = np.random.randint( 2, size=(batch_size, num_predictions)) output_data = model.predict([lm_input_data, masked_position_data]) # Calculate a fully masked weight tensor. This should give a loss of zero. labels = np.random.randint(vocab_size, size=(batch_size, num_predictions)) null_weights = np.zeros((batch_size, num_predictions)) weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=null_weights) # Because the tensor is fully masked, the loss should be 0. self.assertAllClose(0, weighted_loss_data) def test_loss_weights_2d_input(self): """Test masked loss with a 2-d input, from a classifier.""" input_width = 512 num_classes = 10 model = self.create_classification_model(input_width, num_classes) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample((batch_size, input_width)) output_data = model.predict(input_data) # Calculate a fully masked weight tensor. This should give a loss of zero. labels = np.random.randint(num_classes, size=(batch_size)) null_weights = np.zeros((batch_size)) weighted_loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=null_weights) # Because the tensor is fully masked, the loss should be 0. self.assertAllClose(0, weighted_loss_data) def test_mismatched_predictions_and_labels_ranks_squeezes(self): """Test that the loss asserts when rank(predictions)-1 != rank(labels).""" batch_size = 3 output_data = np.random.random_sample((batch_size, 10)) labels = np.random.randint(10, size=(batch_size, 1)) # All that this test tests is that the squeeze is successful. _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) def test_mismatched_weights_and_labels_ranks_fail(self): """Test that the loss asserts when rank(predictions) != rank(labels).""" batch_size = 3 output_data = np.random.random_sample((batch_size, 10, 15)) labels = np.random.randint(10, size=(batch_size, 10)) weights = np.random.randint(2, size=(batch_size)) with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) with self.assertRaisesRegex(RuntimeError, ".*of the same rank.*"): _ = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) def test_tf_tensor_inputs(self): """Test that tf.Tensors can be used as inputs to the loss function.""" batch_size = 3 output_data = tf.convert_to_tensor( np.random.random_sample((batch_size, 10, 15))) labels = tf.convert_to_tensor(np.random.randint(10, size=(batch_size, 10))) weights = tf.convert_to_tensor(np.random.randint(2, size=(batch_size, 10))) # We're not trying to validate numerical correctness, just ensure that # we can in fact pass tensors to these functions without causing runtime # errors from the shape checking code. _ = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels, weights=weights) _ = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) def test_legacy_lm_loss_compatibility(self): """Test to validate computational correctness during refactors.""" # This is the empirical output of a masked LM with the following parameters: # batch_size = 3 # vocab_size = 5 # sequence_length = 4 # num_predictions = 2 output_data = np.array( [[[-2.5286622, -1.0963473, -1.4925185, -2.4451098, -1.2923571], [-2.7117882, -1.1205841, -4.02187, -0.9966936, -1.5119683]], [[-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741], [-2.5379114, -0.82479054, -2.287932, -1.3747153, -2.053741]], [[-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509], [-2.7760355, -1.8219438, -3.0924666, -1.0779881, -0.9407509]]]) labels = np.array([[4, 0], [2, 2], [2, 1]]) # Validate that per_example loss calculations are the same. per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) expected_per_example_loss_data = [[1.2923571, 2.7117882], [2.287932, 2.287932], [3.0924666, 1.8219438]] self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) # Validate that overall loss calculations are the same. weights = np.array([[1, 0], [0, 0], [0, 0]]) loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) expected_loss_data = 1.2923441 self.assertAllClose(expected_loss_data, loss_data) def test_legacy_classification_loss_compatibility(self): """Test to validate computational correctness during refactors.""" # This is the empirical output of a classifier with the following params: # batch_size = 2 # num_classes = 3 output_data = np.array([[-1.6094601e-03, -1.0966038e+01, -6.4434357e+00], [-1.6975292e-03, -6.4009643e+00, -1.0226612e+01]]) labels = np.array([2, 1]) # Validate that per_example loss calculations are the same. per_example_loss_data = weighted_sparse_categorical_crossentropy.per_example_loss( predictions=output_data, labels=labels) expected_per_example_loss_data = [6.4434357, 6.4009643] self.assertAllClose(expected_per_example_loss_data, per_example_loss_data) # Validate that overall loss calculations are the same. weights = None loss_data = weighted_sparse_categorical_crossentropy.loss( predictions=output_data, labels=labels, weights=weights) expected_loss_data = 6.4222 self.assertAllClose(expected_loss_data, loss_data) if __name__ == "__main__": tf.test.main()
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner
runner
runner
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib import signal import sys from typing import List, Type # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .config import Config from .exceptions import RunnerException from .executor import Executor from .finalizer import Finalizer from .logger import LOGGER, log_format from .maintainer import Maintainer from .pipeline import Pipeline from .preparer import Preparer from .triton import Triton class Runner: """ Runner class. Main entrypoint to performing task and experiments """ WORKSPACE = pathlib.Path.cwd() EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace" def __init__( self, pipeline: Pipeline, config: Config, executor_cls: Type[Executor], maintainer_cls: Type[Maintainer], preparer_cls: Type[Preparer], finalizer_cls: Type[Finalizer], devices: List[str] = None, log_level: int = logging.INFO, ): self._pipeline = pipeline self._config = config self._pipeline = pipeline self._config = config self._preparer = preparer_cls() self._finalizer = finalizer_cls() self._devices = devices or ["0"] self._log_level = log_level self._logs_dir = self.EXECUTOR_WORKSPACE / "logs" self._log_file_path = self._logs_dir / "runner.log" self._maintainer = maintainer_cls() self._executor = executor_cls( workspace=self.EXECUTOR_WORKSPACE, maintainer=self._maintainer, pipeline=pipeline, devices=devices, ) signal.signal(signal.SIGINT, self._catch) self._logs_dir.mkdir(parents=True, exist_ok=True) def start(self) -> None: """ Start runner Returns: None """ self._setup_logger() task = self._preparer.exec( workspace=self.EXECUTOR_WORKSPACE, config=self._config, pipeline=self._pipeline, logs_dir=self._logs_dir, maintainer=self._maintainer, triton=Triton(), ) results = [] try: for result in self._executor.start(task): results.append(result) except RunnerException as e: LOGGER.error(f"Error running task: {str(e)}") finally: self._executor.stop() self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results) def _catch(self, signum, frame): """ SIGINT catcher. Stops executor on any sigterm. Args: signum: signal id frame: signal frame """ self._executor.stop() sys.exit(0) def _setup_logger(self) -> None: """ Add file handle for logger Returns: None """ file = logging.FileHandler(self._log_file_path) formatter = logging.Formatter(log_format) file.setFormatter(formatter) LOGGER.addHandler(file) LOGGER.setLevel(level=self._log_level) LOGGER.initialize(file_path=self._log_file_path)
PyTorch/LanguageModeling/Transformer-XL/pytorch/scripts/tests
tests
train_long
#!/bin/bash # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e REPO_DIR=${REPO_DIR:-"/workspace/transformer-xl/pytorch/"} REFERENCE_FILE=$REPO_DIR/scripts/tests/reference_training_throughput MATH=$1 if [[ ${MATH} != "fp16" && ${MATH} != "fp32" ]]; then echo "Unsupported option for MATH, use either 'fp16' or 'fp32'" exit 1 fi PERF_TOLERANCE=0.9 GPU_NAME=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |uniq) echo 'GPU_NAME:' "${GPU_NAME}" GPU_COUNT=$(nvidia-smi --query-gpu=gpu_name --format=csv,noheader |wc -l) echo 'GPU_COUNT:' "${GPU_COUNT}" if (( GPU_COUNT == 16 )); then SYSTEM=dgx2 else SYSTEM=dgx1 fi REFERENCE_PERF=$(grep "${MATH},${GPU_COUNT},${GPU_NAME}" \ ${REFERENCE_FILE} | \cut -f 4 -d ',') if [ -z "${REFERENCE_PERF}" ]; then echo "WARNING: COULD NOT FIND REFERENCE PERFORMANCE FOR EXECUTED CONFIG" TARGET_PERF='' else PERF_THRESHOLD=$(awk 'BEGIN {print ('"${REFERENCE_PERF}"' * '"${PERF_TOLERANCE}"')}') TARGET_PERF='--target_throughput '${PERF_THRESHOLD} fi cd $REPO_DIR bash run_wt103_base.sh train "${GPU_COUNT}" \ --config ${SYSTEM}_${GPU_COUNT}gpu_${MATH} \ --debug \ --max_step 30000 \ --max_step_scheduler 40000 \ --target_perplexity 24.2 \ --log_interval 1 \ ${TARGET_PERF}
PyTorch/Translation/Transformer/fairseq/modules
modules
learned_positional_embedding
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch.nn as nn from fairseq import utils class LearnedPositionalEmbedding(nn.Embedding): """This module learns positional embeddings up to a fixed maximum size. Padding symbols are ignored, but it is necessary to specify whether padding is added on the left side (left_pad=True) or right side (left_pad=False). """ def __init__(self, num_embeddings, embedding_dim, padding_idx, left_pad): super().__init__(num_embeddings, embedding_dim, padding_idx) self.left_pad = left_pad def forward(self, input, incremental_state=None): """Input is expected to be of size [bsz x seqlen].""" if incremental_state is not None: # positions is the same for every token when decoding a single step positions = input.data.new(1, 1).fill_(self.padding_idx + input.size(1)) else: positions = utils.make_positions(input.data, self.padding_idx, self.left_pad) return super().forward(positions)
PyTorch/Detection/Efficientdet/effdet/config
config
train_config
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from omegaconf import OmegaConf def default_detection_train_config(): # FIXME currently using args for train config, will revisit, perhaps move to Hydra h = OmegaConf.create() # dataset h.skip_crowd_during_training = True # augmentation h.input_rand_hflip = True h.train_scale_min = 0.1 h.train_scale_max = 2.0 h.autoaugment_policy = None # optimization h.momentum = 0.9 h.learning_rate = 0.08 h.lr_warmup_init = 0.008 h.lr_warmup_epoch = 1.0 h.first_lr_drop_epoch = 200.0 h.second_lr_drop_epoch = 250.0 h.clip_gradients_norm = 10.0 h.num_epochs = 300 # regularization l2 loss. h.weight_decay = 4e-5 h.lr_decay_method = 'cosine' h.moving_average_decay = 0.9998 h.ckpt_var_scope = None return h
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda
cuda
vision
/** * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #pragma once #include <torch/extension.h> at::Tensor ROIAlign_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio, const bool is_nhwc); at::Tensor ROIAlign_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio, const bool is_nhwc); std::tuple<at::Tensor, at::Tensor> ROIPool_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width); at::Tensor ROIPool_backward_cuda(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width); at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh); at::Tensor compute_flow_cuda(const at::Tensor& boxes, const int height, const int width); at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons, const at::Tensor anchors, const int mask_size); at::Tensor box_iou_cuda(at::Tensor box1, at::Tensor box2); std::vector<at::Tensor> box_encode_cuda(at::Tensor boxes, at::Tensor anchors, float wx, float wy, float ww, float wh); at::Tensor match_proposals_cuda(at::Tensor match_quality_matrix, bool include_low_quality_matches, float low_th, float high_th);
TensorFlow2/LanguageModeling/BERT/data
data
TextSharding
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict from itertools import islice import multiprocessing import os import statistics class Sharding: def __init__(self, input_files, output_name_prefix, n_training_shards, n_test_shards, fraction_test_set): assert len(input_files) > 0, 'The input file list must contain at least one file.' assert n_training_shards > 0, 'There must be at least one output shard.' assert n_test_shards > 0, 'There must be at least one output shard.' self.n_training_shards = n_training_shards self.n_test_shards = n_test_shards self.fraction_test_set = fraction_test_set self.input_files = input_files self.output_name_prefix = output_name_prefix self.output_training_identifier = '_training' self.output_test_identifier = '_test' self.output_file_extension = '.txt' self.articles = {} # key: integer identifier, value: list of articles self.sentences = {} # key: integer identifier, value: list of sentences self.output_training_files = {} # key: filename, value: list of articles to go into file self.output_test_files = {} # key: filename, value: list of articles to go into file self.init_output_files() # Remember, the input files contain one article per line (the whitespace check is to skip extraneous blank lines) def load_articles(self): print('Start: Loading Articles') global_article_count = 0 for input_file in self.input_files: print('input file:', input_file) with open(input_file, mode='r', newline='\n') as f: for i, line in enumerate(f): if line.strip(): self.articles[global_article_count] = line.rstrip() global_article_count += 1 print('End: Loading Articles: There are', len(self.articles), 'articles.') def segment_articles_into_sentences(self, segmenter): print('Start: Sentence Segmentation') if len(self.articles) == 0: self.load_articles() assert len(self.articles) != 0, 'Please check that input files are present and contain data.' # TODO: WIP: multiprocessing (create independent ranges and spawn processes) use_multiprocessing = 'serial' def chunks(data, size=len(self.articles)): it = iter(data) for i in range(0, len(data), size): yield {k: data[k] for k in islice(it, size)} if use_multiprocessing == 'manager': manager = multiprocessing.Manager() return_dict = manager.dict() jobs = [] n_processes = 7 # in addition to the main process, total = n_proc+1 def work(articles, return_dict): sentences = {} for i, article in enumerate(articles): sentences[i] = segmenter.segment_string(articles[article]) if i % 5000 == 0: print('Segmenting article', i) return_dict.update(sentences) for item in chunks(self.articles, len(self.articles)): p = multiprocessing.Process(target=work, args=(item, return_dict)) # Busy wait while len(jobs) >= n_processes: pass jobs.append(p) p.start() for proc in jobs: proc.join() elif use_multiprocessing == 'queue': work_queue = multiprocessing.Queue() jobs = [] for item in chunks(self.articles, len(self.articles)): pass else: # serial option for i, article in enumerate(self.articles): self.sentences[i] = segmenter.segment_string(self.articles[article]) if i % 5000 == 0: print('Segmenting article', i) print('End: Sentence Segmentation') def init_output_files(self): print('Start: Init Output Files') assert len(self.output_training_files) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' assert len(self.output_test_files) == 0, 'Internal storage self.output_files already contains data. This function is intended to be used by the constructor only.' for i in range(self.n_training_shards): name = self.output_name_prefix + self.output_training_identifier + '_' + str(i) + self.output_file_extension self.output_training_files[name] = [] for i in range(self.n_test_shards): name = self.output_name_prefix + self.output_test_identifier + '_' + str(i) + self.output_file_extension self.output_test_files[name] = [] print('End: Init Output Files') def get_sentences_per_shard(self, shard): result = 0 for article_id in shard: result += len(self.sentences[article_id]) return result def distribute_articles_over_shards(self): print('Start: Distribute Articles Over Shards') assert len(self.articles) >= self.n_training_shards + self.n_test_shards, 'There are fewer articles than shards. Please add more data or reduce the number of shards requested.' # Create dictionary with - key: sentence count per article, value: article id number sentence_counts = defaultdict(lambda: []) max_sentences = 0 total_sentences = 0 for article_id in self.sentences: current_length = len(self.sentences[article_id]) sentence_counts[current_length].append(article_id) max_sentences = max(max_sentences, current_length) total_sentences += current_length n_sentences_assigned_to_training = int((1 - self.fraction_test_set) * total_sentences) nominal_sentences_per_training_shard = n_sentences_assigned_to_training // self.n_training_shards nominal_sentences_per_test_shard = (total_sentences - n_sentences_assigned_to_training) // self.n_test_shards consumed_article_set = set({}) unused_article_set = set(self.articles.keys()) # Make first pass and add one article worth of lines per file for file in self.output_training_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[current_article_id]) > nominal_sentences_per_training_shard: nominal_sentences_per_training_shard = len(self.sentences[current_article_id]) print('Warning: A single article contains more than the nominal number of sentences per training shard.') for file in self.output_test_files: current_article_id = sentence_counts[max_sentences][-1] sentence_counts[max_sentences].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 if len(self.sentences[current_article_id]) > nominal_sentences_per_test_shard: nominal_sentences_per_test_shard = len(self.sentences[current_article_id]) print('Warning: A single article contains more than the nominal number of sentences per test shard.') training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) # Make subsequent passes over files to find articles to add without going over limit history_remaining = [] n_history_remaining = 4 while len(consumed_article_set) < len(self.articles): for fidx, file in enumerate(self.output_training_files): nominal_next_article_size = min(nominal_sentences_per_training_shard - training_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or training_counts[fidx] > training_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_training_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) for fidx, file in enumerate(self.output_test_files): nominal_next_article_size = min(nominal_sentences_per_test_shard - test_counts[fidx], max_sentences) # Maintain the max sentence count while len(sentence_counts[max_sentences]) == 0 and max_sentences > 0: max_sentences -= 1 while len(sentence_counts[nominal_next_article_size]) == 0 and nominal_next_article_size > 0: nominal_next_article_size -= 1 if nominal_next_article_size not in sentence_counts or nominal_next_article_size == 0 or test_counts[fidx] > test_median: continue # skip adding to this file, will come back later if no file can accept unused articles current_article_id = sentence_counts[nominal_next_article_size][-1] sentence_counts[nominal_next_article_size].pop(-1) self.output_test_files[file].append(current_article_id) consumed_article_set.add(current_article_id) unused_article_set.remove(current_article_id) # If unable to place articles a few times, bump up nominal sizes by fraction until articles get placed if len(history_remaining) == n_history_remaining: history_remaining.pop(0) history_remaining.append(len(unused_article_set)) history_same = True for i in range(1, len(history_remaining)): history_same = history_same and (history_remaining[i-1] == history_remaining[i]) if history_same: nominal_sentences_per_training_shard += 1 # nominal_sentences_per_test_shard += 1 training_counts = [] test_counts = [] for shard in self.output_training_files: training_counts.append(self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: test_counts.append(self.get_sentences_per_shard(self.output_test_files[shard])) training_median = statistics.median(training_counts) test_median = statistics.median(test_counts) print('Distributing data over shards:', len(unused_article_set), 'articles remaining.') if len(unused_article_set) != 0: print('Warning: Some articles did not make it into output files.') for shard in self.output_training_files: print('Training shard:', self.get_sentences_per_shard(self.output_training_files[shard])) for shard in self.output_test_files: print('Test shard:', self.get_sentences_per_shard(self.output_test_files[shard])) print('End: Distribute Articles Over Shards') def write_shards_to_disk(self): print('Start: Write Shards to Disk') for shard in self.output_training_files: self.write_single_shard(shard, self.output_training_files[shard], 'training') for shard in self.output_test_files: self.write_single_shard(shard, self.output_test_files[shard], 'test') print('End: Write Shards to Disk') def write_single_shard(self, shard_name, shard, split): shard_split = os.path.split(shard_name) shard_name = shard_split[0] + '/' + split + '/' + shard_split[1] with open(shard_name, mode='w', newline='\n') as f: for article_id in shard: for line in self.sentences[article_id]: f.write(line + '\n') f.write('\n') # Line break between articles import nltk nltk.download('punkt') class NLTKSegmenter: def __init(self): pass def segment_string(self, article): return nltk.tokenize.sent_tokenize(article)
PyTorch/Classification/GPUNet/triton/deployment_toolkit
deployment_toolkit
extensions
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os import re from pathlib import Path from typing import List LOGGER = logging.getLogger(__name__) class ExtensionManager: def __init__(self, name: str): self._name = name self._registry = {} def register_extension(self, extension: str, clazz): already_registered_class = self._registry.get(extension, None) if already_registered_class and already_registered_class.__module__ != clazz.__module__: raise RuntimeError( f"Conflicting extension {self._name}/{extension}; " f"{already_registered_class.__module__}.{already_registered_class.__name} " f"and " f"{clazz.__module__}.{clazz.__name__}" ) elif already_registered_class is None: clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None" LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}") self._registry[extension] = clazz def get(self, extension): if extension not in self._registry: raise RuntimeError(f"Missing extension {self._name}/{extension}") return self._registry[extension] @property def supported_extensions(self): return list(self._registry) @staticmethod def scan_for_extensions(extension_dirs: List[Path]): register_pattern = r".*\.register_extension\(.*" for extension_dir in extension_dirs: for python_path in extension_dir.rglob("*.py"): if not python_path.is_file(): continue payload = python_path.read_text() if re.findall(register_pattern, payload): import_path = python_path.relative_to(toolkit_root_dir.parent) package = import_path.parent.as_posix().replace(os.sep, ".") package_with_module = f"{package}.{import_path.stem}" spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path) my_module = importlib.util.module_from_spec(spec) my_module.__package__ = package try: spec.loader.exec_module(my_module) # pytype: disable=attribute-error except ModuleNotFoundError as e: LOGGER.error( f"Could not load extensions from {import_path} due to missing python packages; {e}" ) runners = ExtensionManager("runners") loaders = ExtensionManager("loaders") savers = ExtensionManager("savers") toolkit_root_dir = (Path(__file__).parent / "..").resolve() ExtensionManager.scan_for_extensions([toolkit_root_dir])
PyTorch/Recommendation/DLRM/preproc
preproc
run_spark
#!/bin/bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ######################################################################### # File Name: run_spark.sh echo "Input mode option: $1" if [ "$1" = "CPU" ] then echo "Run with CPU."; shift ./run_spark_cpu.sh ${@} elif [ "$1" = "GPU" ] then echo "Run with GPU."; shift if [ "$DGX_VERSION" = "DGX-2" ] then ./run_spark_gpu_DGX-2.sh ${@} else ./run_spark_gpu_DGX-A100.sh ${@} fi else echo "Please choose mode (CPU/GPU)."; fi
TensorFlow2/Detection/Efficientdet/scripts/D0
D0
evaluate-TF32-8xA100-80G
#!/bin/bash # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. bs=104 ema=0.999 mkdir -p /tmp/evaluate-TF32-8xA100-80G mpirun -np 8 --allow-run-as-root --bind-to none \ -map-by slot -x LD_LIBRARY_PATH -x PATH \ -mca pml ob1 -mca btl ^openib \ -x CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \ python3 eval.py \ --val_file_pattern=/workspace/coco/val-* \ --val_json_file=/workspace/coco/annotations/instances_val2017.json \ --ckpt_path=${CKPT:-/checkpoints/emackpt-300} \ --batch_size=$bs \ --amp=False \ --hparams="moving_average_decay=$ema" \ 2>&1 | tee /tmp/evaluate-TF32-8xA100-80G/eval.log
PaddlePaddle/LanguageModeling/BERT/scripts/configs
configs
pretrain_config
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. dgxa100-80g_8gpu_amp () { train_batch_size="256" learning_rate="6e-3" precision="amp" num_gpus=8 warmup_proportion="0.2843" train_steps=7038 save_checkpoint_steps=200 create_logfile="false" gradient_accumulation_steps=32 seed=42 job_name="bert_lamb_pretraining" train_batch_size_phase2=32 learning_rate_phase2="4e-3" warmup_proportion_phase2="0.128" train_steps_phase2=1563 gradient_accumulation_steps_phase2=128 DATASET=pretrain/phase1/unbinned/parquet # change this for other datasets DATA_DIR_PHASE1="$BERT_PREP_WORKING_DIR/${DATASET}/" DATASET2=pretrain/phase2/bin_size_64/parquet # change this for other datasets DATA_DIR_PHASE2="$BERT_PREP_WORKING_DIR/${DATASET2}/" CODEDIR=/workspace/bert init_checkpoint="None" VOCAB_FILE=vocab/bert-large-uncased-vocab.txt RESULTS_DIR=$CODEDIR/results CHECKPOINTS_DIR=$RESULTS_DIR wikipedia_source=$BERT_PREP_WORKING_DIR/wikipedia/source/ num_dask_workers=128 num_shards_per_worker=128 num_workers=4 sample_ratio="0.9" phase2_bin_size=64 masking=static BERT_CONFIG=bert_configs/bert-large-uncased.json enable_benchmark="false" benchmark_steps=10 # It takes effect only after the enable_benchmark is set to true benchmark_warmup_steps=10 # It takes effect only after the enable_benchmark is set to true echo $train_batch_size $learning_rate $precision $num_gpus \ $warmup_proportion $train_steps $save_checkpoint_steps \ $create_logfile $gradient_accumulation_steps $seed $job_name \ $train_batch_size_phase2 $learning_rate_phase2 \ $warmup_proportion_phase2 $train_steps_phase2 $gradient_accumulation_steps_phase2 \ $DATA_DIR_PHASE1 $DATA_DIR_PHASE2 $CODEDIR $init_checkpoint \ $wikipedia_source $num_dask_workers $num_shards_per_worker $num_workers \ $sample_ratio $phase2_bin_size $masking \ $BERT_CONFIG $enable_benchmark $benchmark_steps $benchmark_warmup_steps } dgxa100-80g_8gpu_tf32 () { train_batch_size="128" learning_rate="6e-3" precision="tf32" num_gpus=8 warmup_proportion="0.2843" train_steps=7038 save_checkpoint_steps=200 create_logfile="false" gradient_accumulation_steps=64 seed=42 job_name="bert_lamb_pretraining" train_batch_size_phase2=16 learning_rate_phase2="4e-3" warmup_proportion_phase2="0.128" train_steps_phase2=1563 gradient_accumulation_steps_phase2=256 DATASET=pretrain/phase1/unbinned/parquet # change this for other datasets DATA_DIR_PHASE1="$BERT_PREP_WORKING_DIR/${DATASET}/" DATASET2=pretrain/phase2/bin_size_64/parquet # change this for other datasets DATA_DIR_PHASE2="$BERT_PREP_WORKING_DIR/${DATASET2}/" CODEDIR=/workspace/bert init_checkpoint="None" VOCAB_FILE=vocab/bert-large-uncased-vocab.txt RESULTS_DIR=$CODEDIR/results CHECKPOINTS_DIR=$RESULTS_DIR wikipedia_source=$BERT_PREP_WORKING_DIR/wikipedia/source/ num_dask_workers=128 num_shards_per_worker=128 num_workers=4 sample_ratio="0.9" phase2_bin_size=64 masking=static BERT_CONFIG=bert_configs/bert-large-uncased.json enable_benchmark="false" benchmark_steps=10 # It takes effect only after the enable_benchmark is set to true benchmark_warmup_steps=10 # It takes effect only after the enable_benchmark is set to true echo $train_batch_size $learning_rate $precision $num_gpus \ $warmup_proportion $train_steps $save_checkpoint_steps \ $create_logfile $gradient_accumulation_steps $seed $job_name \ $train_batch_size_phase2 $learning_rate_phase2 \ $warmup_proportion_phase2 $train_steps_phase2 $gradient_accumulation_steps_phase2 \ $DATA_DIR_PHASE1 $DATA_DIR_PHASE2 $CODEDIR $init_checkpoint \ $wikipedia_source $num_dask_workers $num_shards_per_worker $num_workers \ $sample_ratio $phase2_bin_size $masking \ $BERT_CONFIG $enable_benchmark $benchmark_steps $benchmark_warmup_steps }
PyTorch/Translation/Transformer/fairseq
fairseq
tokenizer
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # #------------------------------------------------------------------------- # # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import Counter import re import torch SPACE_NORMALIZER = re.compile("\s+") path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'prefixes/nonbreaking_prefix.en') prefixes ={} with open(path, 'r') as f: for line in f: line = line.strip() if line and not line[0] == '#': match = re.search(r'(.*)[\s]+(\#NUMERIC_ONLY\#)', line) if match: prefixes[match.group(1)] = 2 else: prefixes[line] = 1 def get_unicode_categories(): import sys from collections import defaultdict import unicodedata cats = defaultdict(list) for c in map(chr, range(sys.maxunicode + 1)): cats[unicodedata.category(c)].append(c) return cats NUMERICS = ''.join(get_unicode_categories()['No']) def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line def tokenize_en(line): line = line.strip() line = ' ' + line + ' ' # remove ASCII junk line = re.sub(r'\s+', ' ', line) line = re.sub(r'[\x00-\x1F]', '', line) #fix whitespaces line = re.sub('\ +', ' ', line) line = re.sub('^ ', '', line) line = re.sub(' $', '', line) #separate other special characters line = re.sub(r'([^\s\.\'\`\,\-\w]|[_'+NUMERICS+'])', r' \g<1> ', line) line = re.sub(r'(\w)\-(?=\w)', r'\g<1> @-@ ', line) #multidots stay together line = re.sub(r'\.([\.]+)', r' DOTMULTI\g<1>', line) while re.search(r'DOTMULTI\.', line): line = re.sub(r'DOTMULTI\.([^\.])', r'DOTDOTMULTI \g<1>', line) line = re.sub(r'DOTMULTI\.', r'DOTDOTMULTI', line) # separate out "," except if within numbers (5,300) line = re.sub(r'([\D])[,]', r'\g<1> , ', line) line = re.sub(r'[,]([\D])', r' , \g<1>', line) # separate "," after a number if it's the end of sentence line = re.sub(r'(\d)[,]$', r'\g<1> ,', line) # split contractions right line = re.sub(r'([\W\d])[\']([\W\d])', '\g<1> \' \g<2>', line) line = re.sub(r'(\W)[\']([\w\D])', '\g<1> \' \g<2>', line) line = re.sub(r'([\w\D])[\']([\W\d])', '\g<1> \' \g<2>', line) line = re.sub(r'([\w\D])[\']([\w\D])', '\g<1> \'\g<2>', line) # special case for "1990's" line = re.sub(r'([\W\d])[\']([s])', '\g<1> \'\g<2>', line) # apply nonbreaking prefixes words = line.split() line = '' for i in range(len(words)): word = words[i] match = re.search(r'^(\S+)\.$', word) if match: pre = match.group(1) if i==len(words)-1: # split last words independently as they are unlikely to be non-breaking prefixes word = pre+' .' elif ((re.search(r'\.', pre) and re.search(r'[^\.\W\d]', pre)) or (pre in prefixes and prefixes[pre]==1) or re.search(r'^[a-z]', words[i+1]) or (pre in prefixes and prefixes[pre]==2 and re.search(r'^[0-9]+', words[i+1]))): pass else: word = pre+' .' word +=' ' line += word # clean up extraneous spaces line = re.sub(' +', ' ', line) line = re.sub('^ ', '', line) line = re.sub(' $', '', line) # .' at end of sentence is missed line = re.sub(r'\.\' ?$', ' . \' ', line) #restore multi-dots while re.search('DOTDOTMULTI', line): line = re.sub('DOTDOTMULTI', 'DOTMULTI.', line) line = re.sub('DOTMULTI', '.', line) # escape special characters line = re.sub(r'\&', r'&amp;', line) line = re.sub(r'\|', r'&#124;', line) line = re.sub(r'\<', r'&lt;', line) line = re.sub(r'\>', r'&gt;', line) line = re.sub(r'\'', r'&apos;', line) line = re.sub(r'\"', r'&quot;', line) line = re.sub(r'\[', r'&#91;', line) line = re.sub(r'\]', r'&#93;', line) #ensure final line breaks if line[-1] != '\n': line += '\n' return line def deescape(line): line = re.sub(r'&#124;', r'|', line) line = re.sub(r'&lt;', r'<', line) line = re.sub(r'&gt;', r'>', line) line = re.sub(r'&quot;', '\"', line) line = re.sub(r'&apos;', '\'', line) line = re.sub(r'&#91;', r'[', line) line = re.sub(r'&#93;', r']', line) line = re.sub(r'&amp;', r'&', line) return line class Tokenizer: @staticmethod def add_file_to_dictionary(filename, dict, tokenize): with open(filename, 'r') as f: for line in f: for word in tokenize(line).split(): dict.add_symbol(word) dict.add_symbol(dict.eos_word) @staticmethod def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False): nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with open(filename, 'r') as f: for line in f: ids = Tokenizer.tokenize( line=line, dictionary=dict, tokenize=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 consumer(ids) ntok += len(ids) return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': len(replaced)} @staticmethod def tokenize(line, dictionary, tokenize=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False, bpe=None): line = tokenize(line) if bpe: line = bpe.process_line(line) words = line.split() if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = dictionary.add_symbol(word) else: idx = dictionary.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = dictionary.eos_index return ids @staticmethod def detokenize(line, lang): #don't try to detokenize XML/HTML tag lines if re.search(r'^<.+>$', line) or re.search(r'^\s*$', line): return line line = line.strip() line = ' '+line+' ' line = re.sub(r' @-@ ', '-', line) line = deescape(line) words = line.split() line = '' quote_count = {'\'':0, '\"':0} prepend_space = ' ' for i in range(len(words)): #perform rught shift of currency and some punctuation if re.search(r'^[\u20ac\x24\(\[\{]+$', words[i]): line += prepend_space + words[i] prepend_space = '' elif re.search(r'^[\,\.\?\!\:\;\\\%\}\]\)]+$', words[i]): if lang=='fr' and re.search(r'^[\?\!\:\;\\\%]$', words[i]): line += ' ' line += words[i] prepend_space = ' ' elif lang=='en' and i>0 and re.search(r'^[\'][\w\D]', words[i]) and re.search(r'\w$', words[i-1]): line += words[i] prepend_space = ' ' elif lang=='cs' and i>1 and re.search(r'^\d+$', words[i-2]) and re.search(r'^[.,]$', words[i-1]) and re.search(r'^\w+$', words[i]): line += words[i] prepend_space = ' ' elif (lang=='fr' or lang=='it') and i<len(words)-1 and re.search(r'[\w\D][\']$', words[i]) and re.search(r'^[\w\D]', words[i+1]): line += prepend_space + words[i] prepend_space = '' elif lang=='cs' and i<len(words)-3 and \ re.search(r'[\w\D]$', words[i]) and \ re.search(r'^-$', words[i+1]) and \ re.search(r'^li$|^mail.*', words[i+2], re.I): #line += ' '+words[i]+words[i+1] pass #TODO: skip one word elif re.search(r'^[\'\"\x60\u201c\u201d]+$', words[i]): normalized_quo = '\"' if re.search(r'^[\u201c\u201d]+$', words[i]) else words[i] quote_count[normalized_quo] = 0 if normalized_quo not in quote_count.keys() else quote_count[normalized_quo] if lang=='cs' and words[i] == '\u201c': quote_count[normalized_quo] = 0 if lang=='cs' and words[i] == '\u201d': quote_count[normalized_quo] = 1 if quote_count[normalized_quo] % 2 == 0: if lang=='en' and words[i]=='\'' and i > 0 and re.search(r'[s]$', words[i-1]): #single quote for posessives ending in s... "The Jones' house" #left shift line += words[i] prepend_space = ' ' else: #right shift line += prepend_space + words[i] prepend_space = '' quote_count[normalized_quo] += 1 else: #left shift line += words[i] prepend_space = ' ' quote_count[normalized_quo] += 1 elif lang=='fi' and re.search(r':$', words[i-1]) and re.search(r'^(N|n|A|a|Ä|ä|ssa|Ssa|ssä|Ssä|sta|stä|Sta|Stä|hun|Hun|hyn|Hyn|han|Han|hän|Hän|hön|Hön|un|Un|yn|Yn|an|An|än|Än|ön|Ön|seen|Seen|lla|Lla|llä|Llä|lta|Lta|ltä|Ltä|lle|Lle|ksi|Ksi|kse|Kse|tta|Tta|ine|Ine)(ni|si|mme|nne|nsa)?(ko|kö|han|hän|pa|pä|kaan|kään|kin)?$', words[i]): line += words[i].lower() prepend_space = ' ' else: line += prepend_space + words[i] prepend_space = ' ' #clean up spaces at head and tail of each line as well as any double-spacing line = re.sub(r' +', ' ', line) line = re.sub(r'\n ', '\n', line) line = re.sub(r' \n', '\n', line) line = re.sub(r'^ ', '', line) line = re.sub(r' $', '', line) #add trailing break line += '\n' if line[-1] != '\n' else '' return line
TensorFlow2/Classification/ConvNets/efficientnet_v1/B0/evaluation
evaluation
evaluation_AMP_A100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python3 main.py --cfg config/efficientnet_v1/b0_cfg.py \ --mode eval \ --use_amp \ --use_xla \ --model_dir ./output \ --data_dir /data \ --eval_batch_size 1024
TensorFlow/Classification/ConvNets/utils
utils
__init__
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from utils import hooks from utils import var_storage from utils import cmdline_helper from utils import data_utils from utils import image_processing from utils import learning_rate from utils import dali_utils
TensorFlow2/LanguageModeling/BERT
BERT
run_pretraining
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Run masked LM/next sentence masked_lm pre-training for BERT in tf2.0.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import app from absl import flags from absl import logging import tensorflow as tf import horovod.tensorflow as hvd import os # Import BERT model libraries. from official.nlp import bert_models import common_flags import input_pipeline import model_saving_utils from official.modeling import model_training_utils from official.nlp import bert_modeling as modeling import optimization import gpu_affinity import dllogger_class from official.utils.misc import distribution_utils from official.utils.misc import keras_utils from official.utils.misc import tpu_lib flags.DEFINE_string('input_files', None, 'File path to retrieve training data for pre-training.') # Model training specific flags. flags.DEFINE_integer( 'max_seq_length', 128, 'The maximum total input sequence length after WordPiece tokenization. ' 'Sequences longer than this will be truncated, and sequences shorter ' 'than this will be padded.') flags.DEFINE_integer('max_predictions_per_seq', 20, 'Maximum predictions per sequence_output.') flags.DEFINE_integer('train_batch_size', 32, 'Total batch size for training.') flags.DEFINE_integer('num_steps_per_epoch', 1000, 'Total number of training steps to run per epoch.') flags.DEFINE_float('warmup_steps', 10000, 'Warmup steps for Adam weight decay optimizer.') common_flags.define_common_bert_flags() FLAGS = flags.FLAGS def get_pretrain_dataset_fn(input_file_pattern, seq_length, max_predictions_per_seq, global_batch_size): """Returns input dataset from input file string.""" def _dataset_fn(ctx=None): """Returns tf.data.Dataset for distributed BERT pretraining.""" input_patterns = input_file_pattern.split(',') batch_size = ctx.get_per_replica_batch_size( global_batch_size) if ctx else global_batch_size train_dataset = input_pipeline.create_pretrain_dataset( input_patterns, seq_length, max_predictions_per_seq, batch_size, is_training=True, input_pipeline_context=ctx, use_horovod=FLAGS.use_horovod) return train_dataset return _dataset_fn def get_loss_fn(loss_factor=1.0): """Returns loss function for BERT pretraining.""" def _bert_pretrain_loss_fn(unused_labels, losses, **unused_args): return tf.keras.backend.mean(losses) * loss_factor return _bert_pretrain_loss_fn def run_customized_training(strategy, bert_config, max_seq_length, max_predictions_per_seq, model_dir, steps_per_epoch, steps_per_loop, epochs, initial_lr, warmup_steps, input_files, train_batch_size): """Run BERT pretrain model training using low-level API.""" train_input_fn = get_pretrain_dataset_fn(input_files, max_seq_length, max_predictions_per_seq, train_batch_size) def _get_pretrain_model(): """Gets a pretraining model.""" pretrain_model, core_model = bert_models.pretrain_model( bert_config, max_seq_length, max_predictions_per_seq, float_type=tf.float16 if FLAGS.use_fp16 else tf.float32) pretrain_model.optimizer = optimization.create_optimizer( initial_lr, steps_per_epoch * epochs, warmup_steps, FLAGS.optimizer_type) if FLAGS.use_fp16: pretrain_model.optimizer = tf.keras.mixed_precision.LossScaleOptimizer(pretrain_model.optimizer, dynamic=True) return pretrain_model, core_model dllogging = dllogger_class.dllogger_class(FLAGS.dllog_path) params = {'dllogging' : dllogging, 'FLAGS' : FLAGS} logging.info("init_lr = %f", initial_lr) trained_model = model_training_utils.run_customized_training_loop( strategy=strategy, model_fn=_get_pretrain_model, loss_fn=get_loss_fn( loss_factor=1.0 / strategy.num_replicas_in_sync if FLAGS.scale_loss and strategy else 1.0), model_dir=model_dir, train_input_fn=train_input_fn, steps_per_epoch=steps_per_epoch, num_accumulative_step=FLAGS.num_accumulation_steps, steps_per_loop=steps_per_loop, epochs=epochs, sub_model_export_name='pretrained/bert_model', init_checkpoint=FLAGS.init_checkpoint, hvd=hvd if FLAGS.use_horovod else None, params=params) return trained_model def run_bert_pretrain(strategy): """Runs BERT pre-training.""" bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) # Padding for divisibility by 8 # if bert_config.vocab_size % 8 != 0: # bert_config.vocab_size += 8 - bert_config.vocab_size % 8 if strategy: logging.info('Training using customized training loop TF 2.0 with distrubuted' 'strategy.') keras_utils.set_config_v2(FLAGS.enable_xla) # Runs customized training loop. return run_customized_training( strategy, bert_config, FLAGS.max_seq_length, FLAGS.max_predictions_per_seq, FLAGS.model_dir, FLAGS.num_steps_per_epoch, FLAGS.steps_per_loop, FLAGS.num_train_epochs, FLAGS.learning_rate * hvd.size() if FLAGS.use_horovod else FLAGS.learning_rate, FLAGS.warmup_steps, FLAGS.input_files, FLAGS.train_batch_size) def main(_): # Users should always run this script under TF 2.x assert tf.version.VERSION.startswith('2.') if not FLAGS.model_dir: FLAGS.model_dir = '/tmp/bert20/' gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) strategy = distribution_utils.get_distribution_strategy( distribution_strategy=FLAGS.distribution_strategy, num_gpus=FLAGS.num_gpus, tpu_address=FLAGS.tpu) if strategy: print('***** Number of cores used : ', strategy.num_replicas_in_sync) if FLAGS.use_horovod: if strategy: raise ValueError('Should not run horovod with distribution strategy') hvd.init() if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') gpu_affinity.set_affinity(hvd.local_rank()) if FLAGS.use_fp16: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) run_bert_pretrain(strategy) if __name__ == '__main__': app.run(main)
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/utils
utils
model_serialization
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from collections import OrderedDict import logging import torch def align_and_update_state_dicts(model_state_dict, loaded_state_dict): """ Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ current_keys = sorted(list(model_state_dict.keys())) loaded_keys = sorted(list(loaded_state_dict.keys())) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # loaded_key string, if it matches match_matrix = [ len(j) if i.endswith(j) else 0 for i in current_keys for j in loaded_keys ] match_matrix = torch.as_tensor(match_matrix).view( len(current_keys), len(loaded_keys) ) max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 # used for logging max_size = max([len(key) for key in current_keys]) if current_keys else 1 max_size_loaded = max([len(key) for key in loaded_keys]) if loaded_keys else 1 log_str_template = "{: <{}} loaded from {: <{}} of shape {}" logger = logging.getLogger(__name__) for idx_new, idx_old in enumerate(idxs.tolist()): if idx_old == -1: continue key = current_keys[idx_new] key_old = loaded_keys[idx_old] model_state_dict[key] = loaded_state_dict[key_old] logger.info( log_str_template.format( key, max_size, key_old, max_size_loaded, tuple(loaded_state_dict[key_old].shape), ) ) def strip_prefix_if_present(state_dict, prefix): keys = sorted(state_dict.keys()) if not all(key.startswith(prefix) for key in keys): return state_dict stripped_state_dict = OrderedDict() for key, value in state_dict.items(): stripped_state_dict[key.replace(prefix, "")] = value return stripped_state_dict def load_state_dict(model, loaded_state_dict): model_state_dict = model.state_dict() # if the state_dict comes from a model that was wrapped in a # DataParallel or DistributedDataParallel during serialization, # remove the "module" prefix before performing the matching loaded_state_dict = strip_prefix_if_present(loaded_state_dict, prefix="module.") align_and_update_state_dicts(model_state_dict, loaded_state_dict) # use strict loading model.load_state_dict(model_state_dict)
PyTorch/SpeechRecognition/Jasper/configs
configs
jasper10x5dr_speedp-online_speca
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. name: "Jasper" labels: [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'"] input_val: audio_dataset: &val_dataset sample_rate: &sample_rate 16000 trim_silence: true normalize_transcripts: true filterbank_features: &val_features normalize: per_feature sample_rate: *sample_rate window_size: 0.02 window_stride: 0.01 window: hann n_filt: &n_filt 64 n_fft: 512 frame_splicing: &frame_splicing 1 dither: 0.00001 pad_align: 16 # For training we keep samples < 16.7s and apply augmentation input_train: audio_dataset: <<: *val_dataset max_duration: 16.7 ignore_offline_speed_perturbation: true speed_perturbation: discrete: false min_rate: 0.85 max_rate: 1.15 filterbank_features: <<: *val_features max_duration: 16.7 spec_augment: freq_masks: 2 max_freq: 20 time_masks: 2 max_time: 75 jasper: encoder: init: xavier_uniform in_feats: *n_filt frame_splicing: *frame_splicing activation: relu use_conv_masks: true blocks: - &Conv1 filters: 256 repeat: 1 kernel_size: [11] stride: [2] dilation: [1] dropout: 0.2 residual: false - &B1 filters: 256 repeat: 5 kernel_size: [11] stride: [1] dilation: [1] dropout: 0.2 residual: true residual_dense: true - *B1 - &B2 filters: 384 repeat: 5 kernel_size: [13] stride: [1] dilation: [1] dropout: 0.2 residual: true residual_dense: true - *B2 - &B3 filters: 512 repeat: 5 kernel_size: [17] stride: [1] dilation: [1] dropout: 0.2 residual: true residual_dense: true - *B3 - &B4 filters: 640 repeat: 5 kernel_size: [21] stride: [1] dilation: [1] dropout: 0.3 residual: true residual_dense: true - *B4 - &B5 filters: 768 repeat: 5 kernel_size: [25] stride: [1] dilation: [1] dropout: 0.3 residual: true residual_dense: true - *B5 - &Conv2 filters: 896 repeat: 1 kernel_size: [29] stride: [1] dilation: [2] dropout: 0.4 residual: false - &Conv3 filters: &enc_feats 1024 repeat: 1 kernel_size: [1] stride: [1] dilation: [1] dropout: 0.4 residual: false decoder: in_feats: *enc_feats init: xavier_uniform
TensorFlow/Recommendation/WideAndDeep
WideAndDeep
requirements-no-deps
tensorflow-transform==0.24.1 apache-beam==2.14 tensorflow-metadata==0.14.0 pydot dill
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssd_mobilenet_v1_0.75_depth_quantized_300x300_pets_sync
# SSD with Mobilenet v1 0.75 depth multiplied feature extractor, focal loss and # quantized training. # Trained on IIIT-Oxford pets, initialized from COCO detection checkpoint # This config is TPU compatible model { ssd { inplace_batchnorm_update: true freeze_batchnorm: false num_classes: 37 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true use_matmul_gather: true } } similarity_calculator { iou_similarity { } } encode_background_as_zeros: true anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 1 box_code_size: 4 apply_sigmoid_to_scores: false class_prediction_bias_init: -4.6 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9, epsilon: 0.001, } } } } feature_extractor { type: 'ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 0.75 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { scale: true, center: true, decay: 0.9, epsilon: 0.001, } } override_base_feature_extractor_hyperparams: true } loss { classification_loss { weighted_sigmoid_focal { alpha: 0.75, gamma: 2.0 } } localization_loss { weighted_smooth_l1 { delta: 1.0 } } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true normalize_loc_loss_by_codesize: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" fine_tune_checkpoint_type: "detection" load_all_detection_checkpoint_vars: true batch_size: 128 sync_replicas: true startup_delay_steps: 0 replicas_to_aggregate: 8 num_steps: 2000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } optimizer { momentum_optimizer: { learning_rate: { cosine_decay_learning_rate { learning_rate_base: 0.2 total_steps: 2000 warmup_steps: 0 } } momentum_optimizer_value: 0.9 } use_moving_average: false } max_number_of_boxes: 100 unpad_groundtruth_tensors: false } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" use_moving_averages: false num_examples: 1100 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" shuffle: false num_readers: 1 } graph_rewriter { quantization { delay: 1800 activation_bits: 8 weight_bits: 8 } }
CUDA-Optimized/FastSpeech/fastspeech/trt
trt
trt_inferencer
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import abc import ctypes import glob import os import pathlib import sys from collections import OrderedDict import numpy as np import tensorrt as trt import torch import torch.nn as nn import torch.nn.functional as F from tensorrt import Dims, ElementWiseOperation, MatrixOperation, Weights from fastspeech.text_norm.symbols import symbols from fastspeech.trt import TRT_LOGGER from fastspeech.utils.logging import tprint from fastspeech.utils.pytorch import remove_module_in_state_dict, to_cpu_numpy class TRTInferencer(object): def __init__(self, model_name, model, data_loader, ckpt_path=None, ckpt_file=None, trt_max_ws_size=1, trt_file_path=None, trt_force_build=False, use_fp16=False): self.model_name = model_name self.model = model self.data_loader = data_loader self.ckpt_path = ckpt_path self.ckpt_file = ckpt_file self.trt_max_ws_size = trt_max_ws_size self.trt_file_path = trt_file_path self.trt_force_build = trt_force_build self.use_fp16 = use_fp16 self.batch_size = data_loader.batch_size self.plugins = dict() self.data_loader_iter = iter(self.data_loader) # checkpoint path if self.ckpt_path: self.ckpt_path = os.path.join(self.ckpt_path, self.model_name) pathlib.Path(self.ckpt_path).mkdir(parents=True, exist_ok=True) # load checkpoint self.load(ckpt_file) self.engine = self.build_engine() def __enter__(self): self.context = self.engine.create_execution_context() def __exit__(self, exception_type, exception_value, traceback): self.context.__del__() self.engine.__del__() def load(self, ckpt_file): # load latest checkpoint file if not defined. if not ckpt_file: files_exist = glob.glob(os.path.join(self.ckpt_path, '*')) if files_exist: ckpt_file = max(files_exist, key=os.path.getctime) if ckpt_file: state_dict = torch.load(ckpt_file, map_location='cpu') self.step = state_dict['step'] self.model.load_state_dict( remove_module_in_state_dict(state_dict['model'])) tprint('[Load] Checkpoint \'{}\'. Step={}'.format( ckpt_file, self.step)) else: tprint('No checkpoints in {}. Load skipped.'.format(self.ckpt_path)) def load_plugin(self, path): ctypes.cdll.LoadLibrary(path) def get_plugin_creator(self, plugin_name): trt.init_libnvinfer_plugins(TRT_LOGGER, '') plugin_creator_list = trt.get_plugin_registry().plugin_creator_list plugin_creator = None for c in plugin_creator_list: if c.name == plugin_name: plugin_creator = c return plugin_creator def get_plugin(self, name): return self.plugins[name] @abc.abstractmethod def create_plugins(self): return NotImplemented @abc.abstractmethod def build_engine(self): return NotImplemented @abc.abstractmethod def infer(self): return NotImplemented
TensorFlow/Detection/SSD/models/research/slim/datasets
datasets
download_imagenet
#!/bin/bash # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Script to download ImageNet Challenge 2012 training and validation data set. # # Downloads and decompresses raw images and bounding boxes. # # **IMPORTANT** # To download the raw images, the user must create an account with image-net.org # and generate a username and access_key. The latter two are required for # downloading the raw images. # # usage: # ./download_imagenet.sh [dirname] set -e if [ "x$IMAGENET_ACCESS_KEY" == x -o "x$IMAGENET_USERNAME" == x ]; then cat <<END In order to download the imagenet data, you have to create an account with image-net.org. This will get you a username and an access key. You can set the IMAGENET_USERNAME and IMAGENET_ACCESS_KEY environment variables, or you can enter the credentials here. END read -p "Username: " IMAGENET_USERNAME read -p "Access key: " IMAGENET_ACCESS_KEY fi OUTDIR="${1:-./imagenet-data}" SYNSETS_FILE="${2:-./synsets.txt}" echo "Saving downloaded files to $OUTDIR" mkdir -p "${OUTDIR}" CURRENT_DIR=$(pwd) BBOX_DIR="${OUTDIR}bounding_boxes" mkdir -p "${BBOX_DIR}" cd "${OUTDIR}" # Download and process all of the ImageNet bounding boxes. BASE_URL="http://www.image-net.org/challenges/LSVRC/2012/nnoupb" # See here for details: http://www.image-net.org/download-bboxes BOUNDING_BOX_ANNOTATIONS="${BASE_URL}/ILSVRC2012_bbox_train_v2.tar.gz" BBOX_TAR_BALL="${BBOX_DIR}/annotations.tar.gz" echo "Downloading bounding box annotations." wget "${BOUNDING_BOX_ANNOTATIONS}" -O "${BBOX_TAR_BALL}" echo "Uncompressing bounding box annotations ..." tar xzf "${BBOX_TAR_BALL}" -C "${BBOX_DIR}" LABELS_ANNOTATED="${BBOX_DIR}/*" NUM_XML=$(ls -1 ${LABELS_ANNOTATED} | wc -l) echo "Identified ${NUM_XML} bounding box annotations." # Download and uncompress all images from the ImageNet 2012 validation dataset. VALIDATION_TARBALL="ILSVRC2012_img_val.tar" OUTPUT_PATH="${OUTDIR}validation/" mkdir -p "${OUTPUT_PATH}" cd "${OUTDIR}/.." echo "Downloading ${VALIDATION_TARBALL} to ${OUTPUT_PATH}." wget -nd -c "${BASE_URL}/${VALIDATION_TARBALL}" tar xf "${VALIDATION_TARBALL}" -C "${OUTPUT_PATH}" # Download all images from the ImageNet 2012 train dataset. TRAIN_TARBALL="ILSVRC2012_img_train.tar" OUTPUT_PATH="${OUTDIR}train/" mkdir -p "${OUTPUT_PATH}" cd "${OUTDIR}/.." echo "Downloading ${TRAIN_TARBALL} to ${OUTPUT_PATH}." wget -nd -c "${BASE_URL}/${TRAIN_TARBALL}" # Un-compress the individual tar-files within the train tar-file. echo "Uncompressing individual train tar-balls in the training data." while read SYNSET; do echo "Processing: ${SYNSET}" # Create a directory and delete anything there. mkdir -p "${OUTPUT_PATH}/${SYNSET}" rm -rf "${OUTPUT_PATH}/${SYNSET}/*" # Uncompress into the directory. tar xf "${TRAIN_TARBALL}" "${SYNSET}.tar" tar xf "${SYNSET}.tar" -C "${OUTPUT_PATH}/${SYNSET}/" rm -f "${SYNSET}.tar" echo "Finished processing: ${SYNSET}" done < "${SYNSETS_FILE}"
PyTorch/Classification/GPUNet
GPUNet
.gitignore
.ipynb_checkpoints __pycache__
TensorFlow/Detection/SSD/models/research/object_detection/core
core
losses
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classification and regression loss functions for object detection. Localization losses: * WeightedL2LocalizationLoss * WeightedSmoothL1LocalizationLoss * WeightedIOULocalizationLoss Classification losses: * WeightedSigmoidClassificationLoss * WeightedSoftmaxClassificationLoss * WeightedSoftmaxClassificationAgainstLogitsLoss * BootstrappedSigmoidClassificationLoss """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.utils import ops slim = tf.contrib.slim class Loss(object): """Abstract base class for loss functions.""" __metaclass__ = ABCMeta def __call__(self, prediction_tensor, target_tensor, ignore_nan_targets=False, losses_mask=None, scope=None, **params): """Call the loss function. Args: prediction_tensor: an N-d tensor of shape [batch, anchors, ...] representing predicted quantities. target_tensor: an N-d tensor of shape [batch, anchors, ...] representing regression or classification targets. ignore_nan_targets: whether to ignore nan targets in the loss computation. E.g. can be used if the target tensor is missing groundtruth data that shouldn't be factored into the loss. losses_mask: A [batch] boolean tensor that indicates whether losses should be applied to individual images in the batch. For elements that are True, corresponding prediction, target, and weight tensors will be removed prior to loss computation. If None, no filtering will take place prior to loss computation. scope: Op scope name. Defaults to 'Loss' if None. **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: a tensor representing the value of the loss function. """ with tf.name_scope(scope, 'Loss', [prediction_tensor, target_tensor, params]) as scope: if ignore_nan_targets: target_tensor = tf.where(tf.is_nan(target_tensor), prediction_tensor, target_tensor) if losses_mask is not None: tensor_multiplier = self._get_loss_multiplier_for_tensor( prediction_tensor, losses_mask) prediction_tensor *= tensor_multiplier target_tensor *= tensor_multiplier if 'weights' in params: params['weights'] = tf.convert_to_tensor(params['weights']) weights_multiplier = self._get_loss_multiplier_for_tensor( params['weights'], losses_mask) params['weights'] *= weights_multiplier return self._compute_loss(prediction_tensor, target_tensor, **params) def _get_loss_multiplier_for_tensor(self, tensor, losses_mask): loss_multiplier_shape = tf.stack([-1] + [1] * (len(tensor.shape) - 1)) return tf.cast(tf.reshape(losses_mask, loss_multiplier_shape), tf.float32) @abstractmethod def _compute_loss(self, prediction_tensor, target_tensor, **params): """Method to be overridden by implementations. Args: prediction_tensor: a tensor representing predicted quantities target_tensor: a tensor representing regression or classification targets **params: Additional keyword arguments for specific implementations of the Loss. Returns: loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per anchor """ pass class WeightedL2LocalizationLoss(Loss): """L2 localization loss function with anchorwise output support. Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2 """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims( weights, 2) square_diff = 0.5 * tf.square(weighted_diff) return tf.reduce_sum(square_diff, 2) class WeightedSmoothL1LocalizationLoss(Loss): """Smooth L1 localization loss function aka Huber Loss.. The smooth L1_loss is defined elementwise as .5 x^2 if |x| <= delta and delta * (|x|- 0.5*delta) otherwise, where x is the difference between predictions and target. See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015) """ def __init__(self, delta=1.0): """Constructor. Args: delta: delta for smooth L1 loss. """ self._delta = delta def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the (encoded) predicted locations of objects. target_tensor: A float tensor of shape [batch_size, num_anchors, code_size] representing the regression targets weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ return tf.reduce_sum(tf.losses.huber_loss( target_tensor, prediction_tensor, delta=self._delta, weights=tf.expand_dims(weights, axis=2), loss_collection=None, reduction=tf.losses.Reduction.NONE ), axis=2) class WeightedIOULocalizationLoss(Loss): """IOU localization loss function. Sums the IOU for corresponding pairs of predicted/groundtruth boxes and for each pair assign a loss of 1 - IOU. We then compute a weighted sum over all pairs which is returned as the total loss. """ def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded predicted boxes target_tensor: A float tensor of shape [batch_size, num_anchors, 4] representing the decoded target boxes weights: a float tensor of shape [batch_size, num_anchors] Returns: loss: a float tensor of shape [batch_size, num_anchors] tensor representing the value of the loss function. """ predicted_boxes = box_list.BoxList(tf.reshape(prediction_tensor, [-1, 4])) target_boxes = box_list.BoxList(tf.reshape(target_tensor, [-1, 4])) per_anchor_iou_loss = 1.0 - box_list_ops.matched_iou(predicted_boxes, target_boxes) return tf.reshape(weights, [-1]) * per_anchor_iou_loss class WeightedSigmoidClassificationLoss(Loss): """Sigmoid cross entropy classification loss function.""" def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class SigmoidFocalClassificationLoss(Loss): """Sigmoid focal cross entropy loss. Focal loss down-weights well classified examples and focusses on the hard examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition. """ def __init__(self, gamma=2.0, alpha=0.25): """Constructor. Args: gamma: exponent of the modulating factor (1 - p_t) ^ gamma. alpha: optional alpha weighting factor to balance positives vs negatives. """ self._alpha = alpha self._gamma = gamma def _compute_loss(self, prediction_tensor, target_tensor, weights, class_indices=None): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. class_indices: (Optional) A 1-D integer tensor of class indices. If provided, computes loss only for the specified class indices. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if class_indices is not None: weights *= tf.reshape( ops.indices_to_dense_vector(class_indices, tf.shape(prediction_tensor)[2]), [1, 1, -1]) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=target_tensor, logits=prediction_tensor)) prediction_probabilities = tf.sigmoid(prediction_tensor) p_t = ((target_tensor * prediction_probabilities) + ((1 - target_tensor) * (1 - prediction_probabilities))) modulating_factor = 1.0 if self._gamma: modulating_factor = tf.pow(1.0 - p_t, self._gamma) alpha_weight_factor = 1.0 if self._alpha is not None: alpha_weight_factor = (target_tensor * self._alpha + (1 - target_tensor) * (1 - self._alpha)) focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor * per_entry_cross_ent) return focal_cross_entropy_loss * weights class WeightedSoftmaxClassificationLoss(Loss): """Softmax loss function.""" def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the prediction is "diffused" and when this value is low, the prediction is made peakier. (default 1.0) """ self._logit_scale = logit_scale def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] prediction_tensor = tf.divide( prediction_tensor, self._logit_scale, name='scale_logit') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class WeightedSoftmaxClassificationAgainstLogitsLoss(Loss): """Softmax loss function against logits. Targets are expected to be provided in logits space instead of "one hot" or "probability distribution" space. """ def __init__(self, logit_scale=1.0): """Constructor. Args: logit_scale: When this value is high, the target is "diffused" and when this value is low, the target is made peakier. (default 1.0) """ self._logit_scale = logit_scale def _scale_and_softmax_logits(self, logits): """Scale logits then apply softmax.""" scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits') return tf.nn.softmax(scaled_logits, name='convert_scores') def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing logit classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors] representing the value of the loss function. """ weights = tf.reduce_mean(weights, axis=2) num_classes = prediction_tensor.get_shape().as_list()[-1] target_tensor = self._scale_and_softmax_logits(target_tensor) prediction_tensor = tf.divide(prediction_tensor, self._logit_scale, name='scale_logits') per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits( labels=tf.reshape(target_tensor, [-1, num_classes]), logits=tf.reshape(prediction_tensor, [-1, num_classes]))) return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights class BootstrappedSigmoidClassificationLoss(Loss): """Bootstrapped sigmoid cross entropy classification loss function. This loss uses a convex combination of training labels and the current model's predictions as training targets in the classification loss. The idea is that as the model improves over time, its predictions can be trusted more and we can use these predictions to mitigate the damage of noisy/incorrect labels, because incorrect labels are likely to be eventually highly inconsistent with other stimuli predicted to have the same label by the model. In "soft" bootstrapping, we use all predicted class probabilities, whereas in "hard" bootstrapping, we use the single class favored by the model. See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by Reed et al. (ICLR 2015). """ def __init__(self, alpha, bootstrap_type='soft'): """Constructor. Args: alpha: a float32 scalar tensor between 0 and 1 representing interpolation weight bootstrap_type: set to either 'hard' or 'soft' (default) Raises: ValueError: if bootstrap_type is not either 'hard' or 'soft' """ if bootstrap_type != 'hard' and bootstrap_type != 'soft': raise ValueError('Unrecognized bootstrap_type: must be one of ' '\'hard\' or \'soft.\'') self._alpha = alpha self._bootstrap_type = bootstrap_type def _compute_loss(self, prediction_tensor, target_tensor, weights): """Compute loss function. Args: prediction_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing the predicted logits for each class target_tensor: A float tensor of shape [batch_size, num_anchors, num_classes] representing one-hot encoded classification targets weights: a float tensor of shape, either [batch_size, num_anchors, num_classes] or [batch_size, num_anchors, 1]. If the shape is [batch_size, num_anchors, 1], all the classses are equally weighted. Returns: loss: a float tensor of shape [batch_size, num_anchors, num_classes] representing the value of the loss function. """ if self._bootstrap_type == 'soft': bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.sigmoid(prediction_tensor) else: bootstrap_target_tensor = self._alpha * target_tensor + ( 1.0 - self._alpha) * tf.cast( tf.sigmoid(prediction_tensor) > 0.5, tf.float32) per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits( labels=bootstrap_target_tensor, logits=prediction_tensor)) return per_entry_cross_ent * weights class HardExampleMiner(object): """Hard example mining for regions in a list of images. Implements hard example mining to select a subset of regions to be back-propagated. For each image, selects the regions with highest losses, subject to the condition that a newly selected region cannot have an IOU > iou_threshold with any of the previously selected regions. This can be achieved by re-using a greedy non-maximum suppression algorithm. A constraint on the number of negatives mined per positive region can also be enforced. Reference papers: "Training Region-based Object Detectors with Online Hard Example Mining" (CVPR 2016) by Srivastava et al., and "SSD: Single Shot MultiBox Detector" (ECCV 2016) by Liu et al. """ def __init__(self, num_hard_examples=64, iou_threshold=0.7, loss_type='both', cls_loss_weight=0.05, loc_loss_weight=0.06, max_negatives_per_positive=None, min_negatives_per_image=0): """Constructor. The hard example mining implemented by this class can replicate the behavior in the two aforementioned papers (Srivastava et al., and Liu et al). To replicate the A2 paper (Srivastava et al), num_hard_examples is set to a fixed parameter (64 by default) and iou_threshold is set to .7 for running non-max-suppression the predicted boxes prior to hard mining. In order to replicate the SSD paper (Liu et al), num_hard_examples should be set to None, max_negatives_per_positive should be 3 and iou_threshold should be 1.0 (in order to effectively turn off NMS). Args: num_hard_examples: maximum number of hard examples to be selected per image (prior to enforcing max negative to positive ratio constraint). If set to None, all examples obtained after NMS are considered. iou_threshold: minimum intersection over union for an example to be discarded during NMS. loss_type: use only classification losses ('cls', default), localization losses ('loc') or both losses ('both'). In the last case, cls_loss_weight and loc_loss_weight are used to compute weighted sum of the two losses. cls_loss_weight: weight for classification loss. loc_loss_weight: weight for location loss. max_negatives_per_positive: maximum number of negatives to retain for each positive anchor. By default, num_negatives_per_positive is None, which means that we do not enforce a prespecified negative:positive ratio. Note also that num_negatives_per_positives can be a float (and will be converted to be a float even if it is passed in otherwise). min_negatives_per_image: minimum number of negative anchors to sample for a given image. Setting this to a positive number allows sampling negatives in an image without any positive anchors and thus not biased towards at least one detection per image. """ self._num_hard_examples = num_hard_examples self._iou_threshold = iou_threshold self._loss_type = loss_type self._cls_loss_weight = cls_loss_weight self._loc_loss_weight = loc_loss_weight self._max_negatives_per_positive = max_negatives_per_positive self._min_negatives_per_image = min_negatives_per_image if self._max_negatives_per_positive is not None: self._max_negatives_per_positive = float(self._max_negatives_per_positive) self._num_positives_list = None self._num_negatives_list = None def __call__(self, location_losses, cls_losses, decoded_boxlist_list, match_list=None): """Computes localization and classification losses after hard mining. Args: location_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise localization losses. cls_losses: a float tensor of shape [num_images, num_anchors] representing anchorwise classification losses. decoded_boxlist_list: a list of decoded BoxList representing location predictions for each image. match_list: an optional list of matcher.Match objects encoding the match between anchors and groundtruth boxes for each image of the batch, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. Match objects in match_list are used to reference which anchors are positive, negative or ignored. If self._max_negatives_per_positive exists, these are then used to enforce a prespecified negative to positive ratio. Returns: mined_location_loss: a float scalar with sum of localization losses from selected hard examples. mined_cls_loss: a float scalar with sum of classification losses from selected hard examples. Raises: ValueError: if location_losses, cls_losses and decoded_boxlist_list do not have compatible shapes (i.e., they must correspond to the same number of images). ValueError: if match_list is specified but its length does not match len(decoded_boxlist_list). """ mined_location_losses = [] mined_cls_losses = [] location_losses = tf.unstack(location_losses) cls_losses = tf.unstack(cls_losses) num_images = len(decoded_boxlist_list) if not match_list: match_list = num_images * [None] if not len(location_losses) == len(decoded_boxlist_list) == len(cls_losses): raise ValueError('location_losses, cls_losses and decoded_boxlist_list ' 'do not have compatible shapes.') if not isinstance(match_list, list): raise ValueError('match_list must be a list.') if len(match_list) != len(decoded_boxlist_list): raise ValueError('match_list must either be None or have ' 'length=len(decoded_boxlist_list).') num_positives_list = [] num_negatives_list = [] for ind, detection_boxlist in enumerate(decoded_boxlist_list): box_locations = detection_boxlist.get() match = match_list[ind] image_losses = cls_losses[ind] if self._loss_type == 'loc': image_losses = location_losses[ind] elif self._loss_type == 'both': image_losses *= self._cls_loss_weight image_losses += location_losses[ind] * self._loc_loss_weight if self._num_hard_examples is not None: num_hard_examples = self._num_hard_examples else: num_hard_examples = detection_boxlist.num_boxes() with tf.device('/CPU:0'): selected_indices = tf.image.non_max_suppression( box_locations, image_losses, num_hard_examples, self._iou_threshold) if self._max_negatives_per_positive is not None and match: (selected_indices, num_positives, num_negatives) = self._subsample_selection_to_desired_neg_pos_ratio( selected_indices, match, self._max_negatives_per_positive, self._min_negatives_per_image) num_positives_list.append(num_positives) num_negatives_list.append(num_negatives) mined_location_losses.append( tf.reduce_sum(tf.gather(location_losses[ind], selected_indices))) mined_cls_losses.append( tf.reduce_sum(tf.gather(cls_losses[ind], selected_indices))) location_loss = tf.reduce_sum(tf.stack(mined_location_losses)) cls_loss = tf.reduce_sum(tf.stack(mined_cls_losses)) if match and self._max_negatives_per_positive: self._num_positives_list = num_positives_list self._num_negatives_list = num_negatives_list return (location_loss, cls_loss) def summarize(self): """Summarize the number of positives and negatives after mining.""" if self._num_positives_list and self._num_negatives_list: avg_num_positives = tf.reduce_mean(tf.to_float(self._num_positives_list)) avg_num_negatives = tf.reduce_mean(tf.to_float(self._num_negatives_list)) tf.summary.scalar('HardExampleMiner/NumPositives', avg_num_positives) tf.summary.scalar('HardExampleMiner/NumNegatives', avg_num_negatives) def _subsample_selection_to_desired_neg_pos_ratio(self, indices, match, max_negatives_per_positive, min_negatives_per_image=0): """Subsample a collection of selected indices to a desired neg:pos ratio. This function takes a subset of M indices (indexing into a large anchor collection of N anchors where M<N) which are labeled as positive/negative via a Match object (matched indices are positive, unmatched indices are negative). It returns a subset of the provided indices retaining all positives as well as up to the first K negatives, where: K=floor(num_negative_per_positive * num_positives). For example, if indices=[2, 4, 5, 7, 9, 10] (indexing into 12 anchors), with positives=[2, 5] and negatives=[4, 7, 9, 10] and num_negatives_per_positive=1, then the returned subset of indices is [2, 4, 5, 7]. Args: indices: An integer tensor of shape [M] representing a collection of selected anchor indices match: A matcher.Match object encoding the match between anchors and groundtruth boxes for a given image, with rows of the Match objects corresponding to groundtruth boxes and columns corresponding to anchors. max_negatives_per_positive: (float) maximum number of negatives for each positive anchor. min_negatives_per_image: minimum number of negative anchors for a given image. Allow sampling negatives in image without any positive anchors. Returns: selected_indices: An integer tensor of shape [M'] representing a collection of selected anchor indices with M' <= M. num_positives: An integer tensor representing the number of positive examples in selected set of indices. num_negatives: An integer tensor representing the number of negative examples in selected set of indices. """ positives_indicator = tf.gather(match.matched_column_indicator(), indices) negatives_indicator = tf.gather(match.unmatched_column_indicator(), indices) num_positives = tf.reduce_sum(tf.to_int32(positives_indicator)) max_negatives = tf.maximum(min_negatives_per_image, tf.to_int32(max_negatives_per_positive * tf.to_float(num_positives))) topk_negatives_indicator = tf.less_equal( tf.cumsum(tf.to_int32(negatives_indicator)), max_negatives) subsampled_selection_indices = tf.where( tf.logical_or(positives_indicator, topk_negatives_indicator)) num_negatives = tf.size(subsampled_selection_indices) - num_positives return (tf.reshape(tf.gather(indices, subsampled_selection_indices), [-1]), num_positives, num_negatives)
PaddlePaddle/LanguageModeling/BERT/data
data
__init__
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model
model
auto_arima
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: models.stat_models.AutoARIMA defaults: - _self_ - /trainer@_global_/trainer: stattrainer
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph
graph
stats
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from copy import deepcopy from operator import itemgetter import numpy as np from syngen.analyzer.graph.graph import safeSNAP def get_normalised_cdf(nodes, cdf_points=100, debug=False): unique_nodes, unique_nodes_counts = np.unique(nodes, return_counts=True) node_degree, node_degree_counts = np.unique( unique_nodes_counts, return_counts=True ) if debug: print( "unique_nodes,unique_nodes_counts", unique_nodes, unique_nodes_counts, ) print( "node_degree,node_degree_counts", node_degree, node_degree_counts ) node_degree_normalized = ( node_degree / node_degree[-1] ) # they are sorted, so [-1] is max node_degree_counts_normalized = node_degree_counts / np.sum( node_degree_counts ) # to have density if debug: print( "node_degree_normalized,node_degree_counts_normalized", node_degree_normalized, node_degree_counts_normalized, ) plt.plot(node_degree_normalized, node_degree_counts_normalized) plt.yscale("log") plt.xscale("log") plt.title("DD normalized log-log") plt.show() F = np.cumsum(node_degree_counts_normalized) cdf_points_for_F = (np.logspace(0, 1, num=cdf_points) - 1) / 9 F_normalized = np.zeros(shape=(cdf_points_for_F.shape[0], 2)) F_normalized[:, 0] = cdf_points_for_F for i, p in enumerate(cdf_points_for_F): matching_list = F[node_degree_normalized <= p] F_normalized[i, 1] = matching_list[-1] if len(matching_list) else 0.0 if debug: print("F_normalized", F_normalized) plt.plot(F_normalized[:, 0], F_normalized[:, 1]) plt.plot(node_degree_normalized, F) plt.yscale("log") plt.xscale("log") plt.title("Normalized CDF of DD normalized log-log ") plt.show() return F_normalized # Global stats @safeSNAP def get_global_stats(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph num_nodes = G.GetNodes() num_edges = G.GetEdges() density = num_edges / ((num_nodes - 1) * num_nodes) if num_nodes > 1 else 0 if not is_directed: density = 2 * density average_degree = num_edges / num_nodes if num_nodes else 0 self_loops = G.CntSelfEdges() zero_degrees = num_nodes - G.CntNonZNodes() zero_in_degrees = len( [item.GetVal2() for item in G.GetNodeInDegV() if item.GetVal2() == 0] ) zero_out_degrees = len( [item.GetVal2() for item in G.GetNodeOutDegV() if item.GetVal2() == 0] ) uniq_bidirectional = G.CntUniqBiDirEdges() uniq_undirected = G.CntUniqUndirEdges() uniq_directed = G.CntUniqDirEdges() return { "Nodes": num_nodes, "Edges": num_edges, "Density": around(density, 4), "Average degree": around(average_degree, 2), "Zero deg nodes": zero_degrees, "Zero in deg nodes": zero_in_degrees, "Zero out deg nodes": zero_out_degrees, "Self loops": self_loops, "Bidirectional edges": uniq_bidirectional, "Unique undirected edges": uniq_undirected, "Unique directed edges": uniq_directed, } # Connectivity @safeSNAP def get_connectivity(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph def get_stats(component_dist_snap): component_dist = [ (comp.GetVal1(), comp.GetVal2()) for comp in component_dist_snap ] if len(component_dist): largest_component = max(component_dist, key=itemgetter(0))[0] else: largest_component = 0 number_of_components = sum( num_component for size, num_component in component_dist ) percent = 100 * largest_component / G.GetNodes() return number_of_components, percent # Weakly connected components number_of_weak_components, percent_of_weak = get_stats(G.GetWccSzCnt()) is_weakly_connected = number_of_weak_components == 1 if is_directed: # Strongly connected components number_of_strong_components, percent_of_strong = get_stats( G.GetSccSzCnt() ) is_strongly_connected = number_of_strong_components == 1 result = { "Is strongly connected": is_strongly_connected, "Is weakly connected": is_weakly_connected, "Number of strongly connected components": number_of_strong_components, "Percent of nodes in largest strongly connected component": around( percent_of_strong ), "Number of weakly connected components": number_of_weak_components, "Percent of nodes in largest weakly connected component": around( percent_of_weak ), } else: result = { "Is connected": is_weakly_connected, "Number of connected components": number_of_weak_components, "Percent of nodes in largest component": around(percent_of_weak), } return result # Cluster coefficient and triangles @safeSNAP def get_transitivity(G, fast=True, *args, **kwargs): G = G.snapGraph results_dict = {} if fast: samples = min(G.GetNodes(), int(1e3)) results_dict["Clustering coefficient"] = G.GetClustCf(samples) else: cc, ct, op = G.GetClustCfAll()[0] results_dict = { "Clustering coefficient": cc, "Number of closed triangles": ct, "Number of open triangles": op, } return results_dict # Distances info @safeSNAP def get_path_stats(G, *args, **kwargs): is_directed = G.is_directed G = G.snapGraph # Only effective diameter if BFS will be too slow or not accurate # approx_eff_diam = G.GetAnfEffDiam() num_test_nodes = max(100, G.GetNodes() // 1000) approx_eff_diam, _, approx_diam, average_path_length = G.GetBfsEffDiamAll( num_test_nodes, is_directed ) return { "90% effective diameter": around(approx_eff_diam), "Approx. full diameter": approx_diam, "Average shortest path length": around(average_path_length), } # Degree similarity def get_dd_simmilarity_score(edges_original, edges_synthetic, cdf_points=1000): F_normalized_original = get_normalised_cdf( edges_original, cdf_points=cdf_points, debug=False ) F_normalized_synthetic = get_normalised_cdf( edges_synthetic, cdf_points=cdf_points, debug=False ) abs_F = np.abs(F_normalized_original[:, 1] - F_normalized_synthetic[:, 1]) where_non_zero = F_normalized_original[:, 1] != 0 error = np.average( np.divide( abs_F[where_non_zero], F_normalized_original[:, 1][where_non_zero] ) ) # average error of normalized CDFs error = min(error, 1) if error < 0: raise ValueError("Negative values in CDFs!") simmilarity_score = 1.0 - error return simmilarity_score def around(number, decimals=2): return np.around(number, decimals)
PyTorch/SpeechSynthesis/FastPitch/scripts
scripts
inference_benchmark
#!/usr/bin/env bash set -a : ${FILELIST:="phrases/benchmark_8_128.tsv"} : ${OUTPUT_DIR:="./output/audio_$(basename ${FILELIST} .tsv)"} : ${TORCHSCRIPT:=true} : ${BS_SEQUENCE:="1 4 8"} : ${WARMUP:=64} : ${REPEATS:=500} : ${AMP:=false} : ${CUDNN_BENCHMARK:=true} for BATCH_SIZE in $BS_SEQUENCE ; do LOG_FILE="$OUTPUT_DIR"/perf-infer_amp-${AMP}_bs${BATCH_SIZE}.json bash scripts/inference_example.sh "$@" done
PyTorch/Recommendation/NCF/data/csv_conversion
csv_conversion
feature_spec
feature_spec: user: cardinality: auto item: cardinality: auto label: metadata: test_samples_per_series: 3 source_spec: train: - type: csv features: #Each line corresponds to a column in the csv files - user - item - label files: - train_data_1.csv # we assume no header - train_data_2.csv test: - type: csv features: - user - item - label files: - test_data_1.csv channel_spec: user_ch: # Channel names are model-specific magics (in this model, neumf_constants.py) - user item_ch: - item label_ch: - label # Requirements: # We assume the ids supplied have already been factorized into 0...N # In the mapping to be used for validation and testing, candidates for each series (each user) appear consecutively. # Each series has the same number of items: metadata['test_samples_per_series']
CUDA-Optimized/FastSpeech/scripts/docker
docker
build
#!/bin/bash docker build . --rm -t fastspeech # docker build . --build-arg UID=$(id -u) --build-arg GID=$(id -g) --build-arg UNAME=$(id -un) --rm -t fastspeech
CUDA-Optimized/FastSpeech/fastspeech/trt
trt
waveglow_trt_inferencer
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import sys import torch import tensorrt as trt from fastspeech.trt import TRT_BASE_PATH, TRT_LOGGER import fastspeech.trt.common as common from fastspeech.utils.logging import tprint from fastspeech.utils.pytorch import to_cpu_numpy, to_gpu_async from fastspeech.inferencer.waveglow_inferencer import WaveGlowInferencer from fastspeech.inferencer.denoiser import Denoiser import pycuda.driver as cuda class WaveGlowTRTInferencer(object): def __init__(self, ckpt_file, engine_file, use_fp16=False, use_denoiser=False, stride=256, n_groups=8): self.ckpt_file = ckpt_file self.engine_file = engine_file self.use_fp16 = use_fp16 self.use_denoiser = use_denoiser self.stride = stride self.n_groups = n_groups if self.use_denoiser: sys.path.append('waveglow') waveglow = torch.load(self.ckpt_file)['model'] waveglow = waveglow.remove_weightnorm(waveglow) waveglow.eval() self.denoiser = Denoiser(waveglow) self.denoiser = to_gpu_async(self.denoiser) tprint('Using WaveGlow denoiser.') # after initialization, we don't need WaveGlow PyTorch checkpoint # anymore - deleting del waveglow torch.cuda.empty_cache() # load engine with open(self.engine_file, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime: self.engine = runtime.deserialize_cuda_engine(f.read()) if self.engine: tprint('TRT Engine Loaded from {} successfully.'.format(self.engine_file)) return else: tprint('Loading TRT Engine from {} failed.'.format(self.engine_file)) def __enter__(self): self.context = self.engine.create_execution_context() def __exit__(self, exception_type, exception_value, traceback): self.context.__del__() self.engine.__del__() def infer(self, mels): batch_size, _, mel_size = mels.shape mels = mels.unsqueeze(3) z = torch.randn(batch_size, self.n_groups, mel_size * self.stride // self.n_groups, 1) wavs = torch.zeros(batch_size, mel_size * self.stride) if self.use_fp16: z = z.half() mels = mels.half() wavs = wavs.half() mels = to_gpu_async(mels) z = to_gpu_async(z) wavs = to_gpu_async(wavs) # create inputs/outputs buffers input_buffers = common.create_inputs_from_torch(self.engine, [mels, z]) output_buffers = common.create_outputs_from_torch(self.engine, [wavs.shape]) # set shapes of inputs self.context = common.set_input_shapes(self.engine, self.context, input_buffers) # execute stream = cuda.Stream() bindings = [int(data.data_ptr()) for data in (input_buffers + output_buffers)] self.context.execute_async_v2(bindings=bindings, stream_handle=stream.handle) stream.synchronize() wavs = output_buffers[0] # denoise if self.use_denoiser: wavs = self.denoiser(wavs, strength=0.01) return wavs.float()
PyTorch/SpeechSynthesis/Tacotron2/platform
platform
DGXA100_waveglow_AMP_4NGPU_train
mkdir -p output python -m multiproc train.py -m WaveGlow -o output/ --amp -lr 1e-4 --epochs 1001 -bs 10 --segment-length 8000 --weight-decay 0 --grad-clip-thresh 65504.0 --cudnn-benchmark --cudnn-enabled --log-file nvlog.json
CUDA-Optimized/FastSpeech/tacotron2
tacotron2
hparams
# BSD 3-Clause License # Copyright (c) 2018-2020, NVIDIA Corporation # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """https://github.com/NVIDIA/tacotron2""" from fastspeech.text_norm import symbols class Hparams: """ hyper parameters """ def __init__(self): ################################ # Experiment Parameters # ################################ self.epochs = 500 self.iters_per_checkpoint = 1000 self.seed = 1234 self.dynamic_loss_scaling = True self.fp16_run = False self.distributed_run = False self.dist_backend = "nccl" self.dist_url = "tcp://localhost:54321" self.cudnn_enabled = True self.cudnn_benchmark = False self.ignore_layers = ['embedding.weight'] ################################ # Data Parameters # ################################ self.load_mel_from_disk = False self.training_files = 'filelists/ljs_audio_text_train_filelist.txt' self.validation_files = 'filelists/ljs_audio_text_val_filelist.txt' self.text_cleaners = ['english_cleaners'] ################################ # Audio Parameters # ################################ self.max_wav_value = 32768.0 self.sampling_rate = 22050 self.filter_length = 1024 self.hop_length = 256 self.win_length = 1024 self.n_mel_channels = 80 self.mel_fmin = 0.0 self.mel_fmax = 8000.0 ################################ # Model Parameters # ################################ self.n_symbols = len(symbols) self.symbols_embedding_dim = 512 # Encoder parameters self.encoder_kernel_size = 5 self.encoder_n_convolutions = 3 self.encoder_embedding_dim = 512 # Decoder parameters self.n_frames_per_step = 1 # currently only 1 is supported self.decoder_rnn_dim = 1024 self.prenet_dim = 256 self.max_decoder_steps = 1000 self.gate_threshold = 0.5 self.p_attention_dropout = 0.1 self.p_decoder_dropout = 0.1 # Attention parameters self.attention_rnn_dim = 1024 self.attention_dim = 128 # Location Layer parameters self.attention_location_n_filters = 32 self.attention_location_kernel_size = 31 # Mel-post processing network parameters self.postnet_embedding_dim = 512 self.postnet_kernel_size = 5 self.postnet_n_convolutions = 5 ################################ # Optimization Hyperparameters # ################################ self.use_saved_learning_rate = False self.learning_rate = 1e-3 self.weight_decay = 1e-6 self.grad_clip_thresh = 1.0 self.batch_size = 64 self.mask_padding = True # set model's padded outputs to padded values def return_self(self): return self def create_hparams(): hparams = Hparams() return hparams.return_self()
TensorFlow2/Detection/Efficientdet/model
model
dataloader
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Data loader and processing.""" from absl import logging import multiprocessing import tensorflow as tf import horovod.tensorflow as hvd from utils import model_utils from model import anchors from object_detection import preprocessor from object_detection import tf_example_decoder from utils.horovod_utils import get_rank, get_world_size from utils.util_keras import get_mixed_precision_policy class InputProcessor: """Base class of Input processor.""" def __init__(self, image, output_size): """Initializes a new `InputProcessor`. Args: image: The input image before processing. output_size: The output image size after calling resize_and_crop_image function. """ self._image = image if isinstance(output_size, int): self._output_size = (output_size, output_size) else: self._output_size = output_size # Parameters to control rescaling and shifting during preprocessing. # Image scale defines scale from original image to scaled image. self._image_scale = tf.constant(1.0) # The integer height and width of scaled image. self._scaled_height = tf.shape(image)[0] self._scaled_width = tf.shape(image)[1] # The x and y translation offset to crop scaled image to the output size. self._crop_offset_y = tf.constant(0) self._crop_offset_x = tf.constant(0) def normalize_image(self, dtype=tf.float32): """Normalize the image to zero mean and unit variance.""" # The image normalization is identical to Cloud TPU ResNet. self._image = tf.image.convert_image_dtype(self._image, dtype=dtype) offset = tf.constant([0.485, 0.456, 0.406], dtype=dtype) offset = tf.expand_dims(offset, axis=0) offset = tf.expand_dims(offset, axis=0) self._image -= offset scale = tf.constant([0.229, 0.224, 0.225], dtype=dtype) scale = tf.expand_dims(scale, axis=0) scale = tf.expand_dims(scale, axis=0) self._image /= scale def get_image(self): return self._image def set_training_random_scale_factors(self, scale_min, scale_max, target_size=None): """Set the parameters for multiscale training. Notably, if train and eval use different sizes, then target_size should be set as eval size to avoid the discrency between train and eval. Args: scale_min: minimal scale factor. scale_max: maximum scale factor. target_size: targeted size, usually same as eval. If None, use train size. """ if not target_size: target_size = self._output_size target_size = model_utils.parse_image_size(target_size) logging.info('target_size = %s, output_size = %s', target_size, self._output_size) # Select a random scale factor. random_scale_factor = tf.random.uniform([], scale_min, scale_max) scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32) scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32) # Recompute the accurate scale_factor using rounded scaled image size. height = tf.cast(tf.shape(self._image)[0], tf.float32) width = tf.cast(tf.shape(self._image)[1], tf.float32) image_scale_y = tf.cast(scaled_y, tf.float32) / height image_scale_x = tf.cast(scaled_x, tf.float32) / width image_scale = tf.minimum(image_scale_x, image_scale_y) # Select non-zero random offset (x, y) if scaled image is larger than # self._output_size. scaled_height = tf.cast(height * image_scale, tf.int32) scaled_width = tf.cast(width * image_scale, tf.int32) offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32) offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32) offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1) offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1) offset_y = tf.cast(offset_y, tf.int32) offset_x = tf.cast(offset_x, tf.int32) self._image_scale = image_scale self._scaled_height = scaled_height self._scaled_width = scaled_width self._crop_offset_x = offset_x self._crop_offset_y = offset_y def set_scale_factors_to_output_size(self): """Set the parameters to resize input image to self._output_size.""" # Compute the scale_factor using rounded scaled image size. height = tf.cast(tf.shape(self._image)[0], tf.float32) width = tf.cast(tf.shape(self._image)[1], tf.float32) image_scale_y = tf.cast(self._output_size[0], tf.float32) / height image_scale_x = tf.cast(self._output_size[1], tf.float32) / width image_scale = tf.minimum(image_scale_x, image_scale_y) scaled_height = tf.cast(height * image_scale, tf.int32) scaled_width = tf.cast(width * image_scale, tf.int32) self._image_scale = image_scale self._scaled_height = scaled_height self._scaled_width = scaled_width def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR): """Resize input image and crop it to the self._output dimension.""" dtype = self._image.dtype scaled_image = tf.compat.v1.image.resize( self._image, [self._scaled_height, self._scaled_width], method=method) if scaled_image.dtype != dtype: scaled_image = tf.image.convert_image_dtype(scaled_image, dtype=dtype) scaled_image = scaled_image[self._crop_offset_y:self._crop_offset_y + self._output_size[0], self._crop_offset_x:self._crop_offset_x + self._output_size[1], :] self._image = tf.image.pad_to_bounding_box(scaled_image, 0, 0, self._output_size[0], self._output_size[1]) # self._image = tf.cast(output_image, dtype) return self._image class DetectionInputProcessor(InputProcessor): """Input processor for object detection.""" def __init__(self, image, output_size, boxes=None, classes=None): InputProcessor.__init__(self, image, output_size) self._boxes = boxes self._classes = classes def random_horizontal_flip(self): """Randomly flip input image and bounding boxes.""" self._image, self._boxes = preprocessor.random_horizontal_flip( self._image, boxes=self._boxes) def clip_boxes(self, boxes): """Clip boxes to fit in an image.""" ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1) ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1) xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1) ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1) xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1) boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1) return boxes def resize_and_crop_boxes(self): """Resize boxes and crop it to the self._output dimension.""" boxlist = preprocessor.box_list.BoxList(self._boxes) # boxlist is in range of [0, 1], so here we pass the scale_height/width # instead of just scale. boxes = preprocessor.box_list_scale(boxlist, self._scaled_height, self._scaled_width).get() # Adjust box coordinates based on the offset. box_offset = tf.stack([ self._crop_offset_y, self._crop_offset_x, self._crop_offset_y, self._crop_offset_x, ]) boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32) # Clip the boxes. boxes = self.clip_boxes(boxes) # Filter out ground truth boxes that are illegal. indices = tf.where( tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]), 0)) boxes = tf.gather_nd(boxes, indices) classes = tf.gather_nd(self._classes, indices) return boxes, classes @property def image_scale(self): # Return image scale from original image to scaled image. return self._image_scale @property def image_scale_to_original(self): # Return image scale from scaled image to original image. return 1.0 / self._image_scale @property def offset_x(self): return self._crop_offset_x @property def offset_y(self): return self._crop_offset_y def pad_to_fixed_size(data, pad_value, output_shape): """Pad data to a fixed length at the first dimension. Args: data: Tensor to be padded to output_shape. pad_value: A constant value assigned to the paddings. output_shape: The output shape of a 2D tensor. Returns: The Padded tensor with output_shape [max_instances_per_image, dimension]. """ max_instances_per_image = output_shape[0] dimension = output_shape[1] data = tf.reshape(data, [-1, dimension]) num_instances = tf.shape(data)[0] msg = 'ERROR: please increase config.max_instances_per_image' with tf.control_dependencies( [tf.assert_less(num_instances, max_instances_per_image, message=msg)]): pad_length = max_instances_per_image - num_instances paddings = pad_value * tf.ones([pad_length, dimension]) padded_data = tf.concat([data, paddings], axis=0) padded_data = tf.reshape(padded_data, output_shape) return padded_data class InputReader: """Input reader for dataset.""" def __init__(self, file_pattern, is_training, use_fake_data=False, max_instances_per_image=None, enable_map_parallelization=True): self._file_pattern = file_pattern self._is_training = is_training self._use_fake_data = use_fake_data # COCO has 100 limit, but users may set different values for custom dataset. self._max_instances_per_image = max_instances_per_image or 100 self._enable_map_parallelization = enable_map_parallelization @tf.autograph.experimental.do_not_convert def dataset_parser(self, value, example_decoder, anchor_labeler, params): """Parse data to a fixed dimension input image and learning targets. Args: value: a single serialized tf.Example string. example_decoder: TF example decoder. anchor_labeler: anchor box labeler. params: a dict of extra parameters. Returns: image: Image tensor that is preprocessed to have normalized value and fixed dimension [image_height, image_width, 3] cls_targets_dict: ordered dictionary with keys [min_level, min_level+1, ..., max_level]. The values are tensor with shape [height_l, width_l, num_anchors]. The height_l and width_l represent the dimension of class logits at l-th level. box_targets_dict: ordered dictionary with keys [min_level, min_level+1, ..., max_level]. The values are tensor with shape [height_l, width_l, num_anchors * 4]. The height_l and width_l represent the dimension of bounding box regression output at l-th level. num_positives: Number of positive anchors in the image. source_id: Source image id. Default value -1 if the source id is empty in the groundtruth annotation. image_scale: Scale of the processed image to the original image. boxes: Groundtruth bounding box annotations. The box is represented in [y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed dimension [self._max_instances_per_image, 4]. is_crowds: Groundtruth annotations to indicate if an annotation represents a group of instances by value {0, 1}. The tensor is padded with 0 to the fixed dimension [self._max_instances_per_image]. areas: Groundtruth areas annotations. The tensor is padded with -1 to the fixed dimension [self._max_instances_per_image]. classes: Groundtruth classes annotations. The tensor is padded with -1 to the fixed dimension [self._max_instances_per_image]. """ with tf.name_scope('parser'): data = example_decoder.decode(value) source_id = data['source_id'] image = data['image'] boxes = data['groundtruth_boxes'] classes = data['groundtruth_classes'] classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1]) areas = data['groundtruth_area'] is_crowds = data['groundtruth_is_crowd'] image_masks = data.get('groundtruth_instance_masks', []) classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1]) if self._is_training: # Training time preprocessing. if params['skip_crowd_during_training']: indices = tf.where(tf.logical_not(data['groundtruth_is_crowd'])) classes = tf.gather_nd(classes, indices) boxes = tf.gather_nd(boxes, indices) input_processor = DetectionInputProcessor(image, params['image_size'], boxes, classes) input_processor.normalize_image(dtype=tf.float16 if \ params['mixed_precision'] else tf.float32) if self._is_training: if params['input_rand_hflip']: input_processor.random_horizontal_flip() input_processor.set_training_random_scale_factors( params['jitter_min'], params['jitter_max'], params.get('target_size', None)) else: input_processor.set_scale_factors_to_output_size() image = input_processor.resize_and_crop_image() boxes, classes = input_processor.resize_and_crop_boxes() # Assign anchors. (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(boxes, classes) source_id = tf.where( tf.equal(source_id, tf.constant('')), '-1', source_id) source_id = tf.strings.to_number(source_id) # Pad groundtruth data for evaluation. image_scale = input_processor.image_scale_to_original boxes *= image_scale is_crowds = tf.cast(is_crowds, dtype=tf.float32) boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4]) is_crowds = pad_to_fixed_size(is_crowds, 0, [self._max_instances_per_image, 1]) areas = pad_to_fixed_size(areas, -1, [self._max_instances_per_image, 1]) classes = pad_to_fixed_size(classes, -1, [self._max_instances_per_image, 1]) if params['mixed_precision']: dtype = get_mixed_precision_policy().compute_dtype if image.dtype != dtype: image = tf.cast(image, dtype=dtype) box_targets = tf.nest.map_structure( lambda box_target: tf.cast(box_target, dtype=dtype), box_targets) return (image, cls_targets, box_targets, num_positives, source_id, image_scale, boxes, is_crowds, areas, classes, image_masks) @tf.autograph.experimental.do_not_convert def process_example(self, params, batch_size, images, cls_targets, box_targets, num_positives, source_ids, image_scales, boxes, is_crowds, areas, classes, image_masks): """Processes one batch of data.""" labels = {} # Count num_positives in a batch. num_positives_batch = tf.reduce_mean(num_positives) labels['mean_num_positives'] = tf.reshape( tf.tile(tf.expand_dims(num_positives_batch, 0), [ batch_size, ]), [batch_size, 1]) if params['data_format'] == 'channels_first': images = tf.transpose(images, [0, 3, 1, 2]) for level in range(params['min_level'], params['max_level'] + 1): labels['cls_targets_%d' % level] = cls_targets[level] labels['box_targets_%d' % level] = box_targets[level] if params['data_format'] == 'channels_first': labels['cls_targets_%d' % level] = tf.transpose( labels['cls_targets_%d' % level], [0, 3, 1, 2]) labels['box_targets_%d' % level] = tf.transpose( labels['box_targets_%d' % level], [0, 3, 1, 2]) # Concatenate groundtruth annotations to a tensor. groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2) labels['source_ids'] = source_ids labels['groundtruth_data'] = groundtruth_data labels['image_scales'] = image_scales labels['image_masks'] = image_masks return images, labels @property def dataset_options(self): options = tf.data.Options() options.experimental_deterministic = not self._is_training options.experimental_optimization.map_parallelization = self._enable_map_parallelization options.experimental_optimization.parallel_batch = True options.threading.private_threadpool_size = max(2, (multiprocessing.cpu_count() // hvd.local_size()) - 2) return options def __call__(self, params, input_context=None, batch_size=None): input_anchors = anchors.Anchors(params['min_level'], params['max_level'], params['num_scales'], params['aspect_ratios'], params['anchor_scale'], params['image_size']) anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes']) example_decoder = tf_example_decoder.TfExampleDecoder( include_mask='segmentation' in params['heads'], regenerate_source_id=params['regenerate_source_id'] ) batch_size = batch_size or params['batch_size'] dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False) if self._is_training: dataset = dataset.shard(get_world_size(), get_rank()) dataset.shuffle(buffer_size=1024) # Prefetch data from files. def _prefetch_dataset(filename): if params.get('dataset_type', None) == 'sstable': pass else: dataset = tf.data.TFRecordDataset(filename).prefetch(1) return dataset dataset = dataset.interleave( _prefetch_dataset, cycle_length=10, block_length=16, num_parallel_calls=tf.data.experimental.AUTOTUNE) dataset = dataset.with_options(self.dataset_options) if self._is_training: dataset = dataset.shuffle(params['shuffle_buffer']) # Parse the fetched records to input tensors for model function. # pylint: disable=g-long-lambda if params.get('dataset_type', None) == 'sstable': map_fn = lambda key, value: self.dataset_parser(value, example_decoder, anchor_labeler, params) else: map_fn = lambda value: self.dataset_parser(value, example_decoder, anchor_labeler, params) # pylint: enable=g-long-lambda dataset = dataset.map( map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) # dataset = dataset.prefetch(batch_size) dataset = dataset.batch(batch_size, drop_remainder=params['drop_remainder']) dataset = dataset.map( lambda *args: self.process_example(params, batch_size, *args)) dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE) if self._is_training: dataset = dataset.repeat() if self._use_fake_data: # Turn this dataset into a semi-fake dataset which always loop at the # first batch. This reduces variance in performance and is useful in # testing. dataset = dataset.take(1).cache().repeat() return dataset
TensorFlow2/Recommendation/WideAndDeep/scripts
scripts
training_full
#!/bin/bash # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e usage() { cat <<EOF Usage: bash scripts/training_full.sh -g gpu -g | --gpu (Required) Number of gpus -a | --amp (Optional) Use amp -x | --xla (Optional) Use xla EOF } if [ ! -d "scripts" ] || [ ! "$(ls -A 'scripts')" ]; then echo "You are probably calling this script from wrong directory" usage exit 1 fi amp= xla= gpu= while [ "$1" != "" ]; do case $1 in -g | --gpu) shift gpu="$1" ;; -a | --amp) amp="--amp" ;; -x | --xla) xla="--xla" ;; *) usage exit 1 ;; esac shift done if [ -z "$gpu" ]; then echo "Missing number of gpus param" usage exit 1 fi if ! [ "$gpu" -ge 0 ] || [[ ! "$gpu" =~ ^(1|4|8)$ ]] 2>/dev/null; then echo "Expected number of gpus (${gpu}) to be equal 1, 4 or 8" usage exit 1 fi cmd="horovodrun -np ${gpu} sh hvd_wrapper.sh \ python main.py \ ${amp} \ ${xla}" set -x $cmd
TensorFlow/Detection/SSD/models/research/slim/datasets
datasets
dataset_utils
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains utilities for downloading and converting datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import tarfile from six.moves import urllib import tensorflow as tf LABELS_FILENAME = 'labels.txt' def int64_feature(values): """Returns a TF-Feature of int64s. Args: values: A scalar or list of values. Returns: A TF-Feature. """ if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(int64_list=tf.train.Int64List(value=values)) def bytes_feature(values): """Returns a TF-Feature of bytes. Args: values: A string. Returns: A TF-Feature. """ return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values])) def float_feature(values): """Returns a TF-Feature of floats. Args: values: A scalar of list of values. Returns: A TF-Feature. """ if not isinstance(values, (tuple, list)): values = [values] return tf.train.Feature(float_list=tf.train.FloatList(value=values)) def image_to_tfexample(image_data, image_format, height, width, class_id): return tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': bytes_feature(image_data), 'image/format': bytes_feature(image_format), 'image/class/label': int64_feature(class_id), 'image/height': int64_feature(height), 'image/width': int64_feature(width), })) def download_and_uncompress_tarball(tarball_url, dataset_dir): """Downloads the `tarball_url` and uncompresses it locally. Args: tarball_url: The URL of a tarball file. dataset_dir: The directory where the temporary files are stored. """ filename = tarball_url.split('/')[-1] filepath = os.path.join(dataset_dir, filename) def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') tarfile.open(filepath, 'r:gz').extractall(dataset_dir) def write_label_file(labels_to_class_names, dataset_dir, filename=LABELS_FILENAME): """Writes a file with the list of class names. Args: labels_to_class_names: A map of (integer) labels to class names. dataset_dir: The directory in which the labels file should be written. filename: The filename where the class names are written. """ labels_filename = os.path.join(dataset_dir, filename) with tf.gfile.Open(labels_filename, 'w') as f: for label in labels_to_class_names: class_name = labels_to_class_names[label] f.write('%d:%s\n' % (label, class_name)) def has_labels(dataset_dir, filename=LABELS_FILENAME): """Specifies whether or not the dataset directory contains a label map file. Args: dataset_dir: The directory in which the labels file is found. filename: The filename where the class names are written. Returns: `True` if the labels file exists and `False` otherwise. """ return tf.gfile.Exists(os.path.join(dataset_dir, filename)) def read_label_file(dataset_dir, filename=LABELS_FILENAME): """Reads the labels file and returns a mapping from ID to class name. Args: dataset_dir: The directory in which the labels file is found. filename: The filename where the class names are written. Returns: A map from a label (integer) to class name. """ labels_filename = os.path.join(dataset_dir, filename) with tf.gfile.Open(labels_filename, 'rb') as f: lines = f.read().decode() lines = lines.split('\n') lines = filter(None, lines) labels_to_class_names = {} for line in lines: index = line.index(':') labels_to_class_names[int(line[:index])] = line[index+1:] return labels_to_class_names
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/analyzer/graph
graph
analyser
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import matplotlib.pyplot as plt import pandas as pd from syngen.analyzer.graph.plotting import ( plot_clustering_coef_distribution, plot_degree_distribution, plot_eigenvalue_histogram_distribution, plot_eigenvalue_rank_distribution, plot_hopplot, plot_in_degree_distribution, plot_leading_singular_vector_rank, plot_out_degree_distribution, plot_singular_value_histogram_distribution, plot_singular_value_rank_distribution, plot_strongly_connected_component_distribution, plot_weakly_connected_component_distribution, ) from syngen.analyzer.graph.stats import ( get_connectivity, get_global_stats, get_path_stats, get_transitivity, ) from syngen.analyzer.graph.utils import timed class AnalysisModule: @staticmethod def check_assertions(graphs): assert len(graphs), "Expected at least 1 graph" assert ( len(set([graph.is_directed for graph in graphs])) == 1 ), "All graphs have to be directed or undirected" @staticmethod def maybe_wrap_timer(f, timer, title): return timed(f, title) if timer else f def compare_graph_stats( self, *graphs, global_stats=True, connectivity=True, transitivity=True, path_stats=True, timer=False, fast=True, ): self.check_assertions(graphs) results = [] category_functions = [] if global_stats: category_functions.append(("Global stats", get_global_stats)) if connectivity: category_functions.append(("Connectivity", get_connectivity)) if transitivity: category_functions.append(("Transitivity", get_transitivity)) if path_stats: category_functions.append(("Path stats", get_path_stats)) for category, F in category_functions: start = time.perf_counter() stats = [F(G, fast=fast) for G in graphs] parsed = [ tuple( [category, statistic] + [graph_stats[statistic] for graph_stats in stats] ) for statistic in stats[0] ] results += parsed if timer: elapsed = time.perf_counter() - start print(f'Category "{category}" took {elapsed:.2f}s') names = [ graph.name if graph.name else f"G{i}" for i, graph in enumerate(graphs, 1) ] columns = ["Category", "Statistic"] + names return pd.DataFrame(results, columns=columns) def compare_graph_plots(self, *graphs, hop_plot_iters=128, timer=False): self.check_assertions(graphs) is_directed = graphs[0].is_directed if is_directed: fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2) ax1, ax2, ax3, ax4 = ax3, ax4, ax1, ax2 fig.set_size_inches(18, 6 * 2, forward=True) else: fig, (ax1, ax2) = plt.subplots(1, 2) fig.set_size_inches(18, 6, forward=True) pdd = self.maybe_wrap_timer( plot_degree_distribution, timer, "Degree distribution" ) pidd = self.maybe_wrap_timer( plot_in_degree_distribution, timer, "In degree distribution" ) podd = self.maybe_wrap_timer( plot_out_degree_distribution, timer, "Out degree distribution" ) ph = self.maybe_wrap_timer(plot_hopplot, timer, "Hop plot") if is_directed: pidd(ax3, *graphs) podd(ax4, *graphs) pdd(ax1, *graphs) ph(ax2, *graphs, hop_plot_iters=hop_plot_iters) return fig def compare_graph_dd(self, *graphs, timer=False): self.check_assertions(graphs) fig, ax1 = plt.subplots(1, 1) fig.set_size_inches(18.5, 10.5, forward=True) pdd = ( timed(plot_degree_distribution, "Degree distribution") if timer else plot_degree_distribution ) pdd(ax1, *graphs) return fig
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer
perf_analyzer
perf_config
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any, Dict from .exceptions import PerfAnalyzerException class PerfAnalyzerConfig: """ A config class to set arguments to the perf_analyzer. An argument set to None will use the perf_analyzer's default. """ perf_analyzer_args = [ "async", "sync", "measurement-interval", "measurement-mode", "measurement-request-count", "concurrency-range", "request-rate-range", "request-distribution", "request-intervals", "binary-search", "num-of-sequence", "latency-threshold", "max-threads", "stability-percentage", "max-trials", "percentile", "input-data", "shared-memory", "output-shared-memory-size", "sequence-length", "string-length", "string-data", ] perf_analyzer_multiple_args = [ "shape", ] input_to_options = [ "model-name", "model-version", "batch-size", "url", "protocol", "latency-report-file", "streaming", ] input_to_verbose = ["verbose", "extra-verbose"] def __init__(self): """ Construct a PerfAnalyzerConfig """ self._args = {k: None for k in self.perf_analyzer_args} self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args} self._options = { "-m": None, "-x": None, "-b": None, "-u": None, "-i": None, "-f": None, "-H": None, "-c": None, "-t": None, } self._verbose = {"-v": None, "-v -v": None} self._input_to_options = { "model-name": "-m", "model-version": "-x", "batch-size": "-b", "url": "-u", "protocol": "-i", "latency-report-file": "-f", "streaming": "-H", "concurrency": "-c", "threads": "-t", } self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"} @classmethod def allowed_keys(cls): """ Returns ------- list of str The keys that are allowed to be passed into perf_analyzer """ return ( list(cls.perf_analyzer_args) + list(cls.perf_analyzer_multiple_args) + list(cls.input_to_options) + list(cls.input_to_verbose) ) def update_config(self, params=None): """ Allows setting values from a params dict Parameters ---------- params: dict keys are allowed args to perf_analyzer """ if params: for key in params: self[key] = params[key] def to_cli_string(self): """ Utility function to convert a config into a string of arguments to the perf_analyzer with CLI. Returns ------- str cli command string consisting of all arguments to the perf_analyzer set in the config, without the executable name. """ # single dashed options, then verbose flags, then main args args = [f"{k} {v}" for k, v in self._options.items() if v] args += [k for k, v in self._verbose.items() if v] args += [f"--{k}={v}" for k, v in self._args.items() if v] for k, v in self._multiple_args.items(): for item in v: args.append(f"--{k}={item}") return " ".join(args) def __getitem__(self, key: str): """ Gets an arguments value in config Parameters ---------- key : str The name of the argument to the perf_analyzer Returns ------- The value that the argument is set to in this config Raises ------ TritonModelAnalyzerException If argument not found in the config """ if key in self._args: return self._args[key] elif key in self._multiple_args: return self._multiple_args[key] elif key in self._input_to_options: return self._options[self._input_to_options[key]] elif key in self._input_to_verbose: return self._verbose[self._input_to_verbose[key]] else: raise PerfAnalyzerException(f"'{key}' Key not found in config") def __setitem__(self, key: str, value: Any): """ Sets an arguments value in config after checking if defined/supported. Parameters ---------- key : str The name of the argument to the perf_analyzer value : (any) The value to which the argument is being set Raises ------ TritonModelAnalyzerException If key is unsupported or undefined in the config class """ if key in self._args: self._args[key] = value elif key in self._multiple_args: self._multiple_args[key].append(value) elif key in self._input_to_options: self._options[self._input_to_options[key]] = value elif key in self._input_to_verbose: self._verbose[self._input_to_verbose[key]] = value else: raise PerfAnalyzerException( f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer." )
PaddlePaddle/LanguageModeling/BERT/utils
utils
utility
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import random import numpy as np import paddle def get_num_trainers(): """Get number of trainers in distributed training.""" num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) return num_trainers def get_trainer_id(): """Get index of trainer in distributed training.""" trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0)) return trainer_id def is_integer(number): """Whether a number is integer.""" if sys.version > '3': return isinstance(number, int) return isinstance(number, (int, long)) def set_seed(seed): """Set random seed.""" random.seed(seed) np.random.seed(seed) paddle.seed(seed)
TensorFlow2/LanguageModeling/BERT
BERT
input_pipeline
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """BERT model input pipelines.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf import horovod.tensorflow as hvd def decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.io.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.cast(t, tf.int32) example[name] = t return example def single_file_dataset(input_file, name_to_features, use_horovod=False): """Creates a single-file dataset to be passed for BERT custom training.""" # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if use_horovod: d = d.shard(hvd.size(), hvd.rank()) d = d.map(lambda record: decode_record(record, name_to_features)) # When `input_file` is a path to a single file or a list # containing a single path, disable auto sharding so that # same input file is sent to all workers. if isinstance(input_file, str) or len(input_file) == 1: options = tf.data.Options() options.experimental_distribute.auto_shard_policy = ( tf.data.experimental.AutoShardPolicy.OFF) d = d.with_options(options) return d def create_pretrain_dataset(input_patterns, seq_length, max_predictions_per_seq, batch_size, is_training=True, input_pipeline_context=None, use_horovod=False): """Creates input dataset from (tf)records files for pretraining.""" name_to_features = { 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), 'masked_lm_positions': tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), 'masked_lm_ids': tf.io.FixedLenFeature([max_predictions_per_seq], tf.int64), 'masked_lm_weights': tf.io.FixedLenFeature([max_predictions_per_seq], tf.float32), 'next_sentence_labels': tf.io.FixedLenFeature([1], tf.int64), } dataset = tf.data.Dataset.list_files(input_patterns, shuffle=is_training) if use_horovod: dataset = dataset.shard(hvd.size(), hvd.rank()) if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: dataset = dataset.shard(input_pipeline_context.num_input_pipelines, input_pipeline_context.input_pipeline_id) dataset = dataset.repeat() # We set shuffle buffer to exactly match total number of # training files to ensure that training data is well shuffled. input_files = [] for input_pattern in input_patterns: input_files.extend(tf.io.gfile.glob(input_pattern)) dataset = dataset.shuffle(len(input_files)) # In parallel, create tf record dataset for each train files. # cycle_length = 8 means that up to 8 files will be read and deserialized in # parallel. You may want to increase this number if you have a large number of # CPU cores. dataset = dataset.interleave( tf.data.TFRecordDataset, cycle_length=8, num_parallel_calls=tf.data.experimental.AUTOTUNE) decode_fn = lambda record: decode_record(record, name_to_features) dataset = dataset.map( decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) def _select_data_from_record(record): """Filter out features to use for pretraining.""" x = { 'input_word_ids': record['input_ids'], 'input_mask': record['input_mask'], 'input_type_ids': record['segment_ids'], 'masked_lm_positions': record['masked_lm_positions'], 'masked_lm_ids': record['masked_lm_ids'], 'masked_lm_weights': record['masked_lm_weights'], 'next_sentence_labels': record['next_sentence_labels'], } y = record['masked_lm_weights'] return (x, y) dataset = dataset.map( _select_data_from_record, num_parallel_calls=tf.data.experimental.AUTOTUNE) if is_training: dataset = dataset.shuffle(100) dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.prefetch(1024) return dataset def create_classifier_dataset(file_path, seq_length, batch_size, is_training=True, input_pipeline_context=None, use_horovod=False): """Creates input dataset from (tf)records files for train/eval.""" name_to_features = { 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), 'label_ids': tf.io.FixedLenFeature([], tf.int64), 'is_real_example': tf.io.FixedLenFeature([], tf.int64), } dataset = single_file_dataset(file_path, name_to_features, use_horovod) # The dataset is always sharded by number of hosts. # num_input_pipelines is the number of hosts rather than number of cores. if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: dataset = dataset.shard(input_pipeline_context.num_input_pipelines, input_pipeline_context.input_pipeline_id) def _select_data_from_record(record): x = { 'input_word_ids': record['input_ids'], 'input_mask': record['input_mask'], 'input_type_ids': record['segment_ids'] } y = record['label_ids'] return (x, y) dataset = dataset.map(_select_data_from_record) if is_training: dataset = dataset.shuffle(100) dataset = dataset.repeat() dataset = dataset.batch(batch_size, drop_remainder=is_training) dataset = dataset.prefetch(1024) return dataset def create_squad_dataset(file_path, seq_length, batch_size, is_training=True, input_pipeline_context=None, use_horovod=False): """Creates input dataset from (tf)records files for train/eval.""" name_to_features = { 'input_ids': tf.io.FixedLenFeature([seq_length], tf.int64), 'input_mask': tf.io.FixedLenFeature([seq_length], tf.int64), 'segment_ids': tf.io.FixedLenFeature([seq_length], tf.int64), } if is_training: name_to_features['start_positions'] = tf.io.FixedLenFeature([], tf.int64) name_to_features['end_positions'] = tf.io.FixedLenFeature([], tf.int64) else: name_to_features['unique_ids'] = tf.io.FixedLenFeature([], tf.int64) dataset = single_file_dataset(file_path, name_to_features, use_horovod) # The dataset is always sharded by number of hosts. # num_input_pipelines is the number of hosts rather than number of cores. if input_pipeline_context and input_pipeline_context.num_input_pipelines > 1: dataset = dataset.shard(input_pipeline_context.num_input_pipelines, input_pipeline_context.input_pipeline_id) def _select_data_from_record(record): """Dispatches record to features and labels.""" x, y = {}, {} for name, tensor in record.items(): if name in ('start_positions', 'end_positions'): y[name] = tensor elif name == 'input_ids': x['input_word_ids'] = tensor elif name == 'segment_ids': x['input_type_ids'] = tensor else: x[name] = tensor return (x, y) dataset = dataset.map(_select_data_from_record) if is_training: dataset = dataset.shuffle(100) dataset = dataset.repeat() dataset = dataset.batch(batch_size, drop_remainder=True) dataset = dataset.prefetch(1024) return dataset
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc
csrc
box_iou
/** * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "cuda/vision.h" #ifndef _box_iou_h_ #define _box_iou_h_ at::Tensor box_iou(at::Tensor box1, at::Tensor box2){ at::Tensor result = box_iou_cuda(box1, box2); return result; } #endif
TensorFlow/Classification/ConvNets/model
model
__init__
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from model import layers from model import blocks from model import resnet
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
CONTRIBUTING
# Contributing to the Tensorflow Object Detection API Patches to Tensorflow Object Detection API are welcome! We require contributors to fill out either the individual or corporate Contributor License Agreement (CLA). * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). Please follow the [Tensorflow contributing guidelines](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md) when submitting pull requests.
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt
trt
CMakeLists
## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # file(GLOB TACOTRON2WAVEGLOW_SOURCES speechSynthesizer.cpp denoiser/*.cpp layers/*.cpp tacotron2/*.cpp tacotron2/*.cu util/*.cpp util/*.cu waveglow/*.cpp waveglow/*.cu plugins/taco2AttentionPlugin/*.cpp plugins/taco2AttentionPlugin/*.cu plugins/taco2DenoiseTransformPlugin/*.cpp plugins/taco2DenoiseTransformPlugin/*.cu plugins/taco2LSTMCellPlugin/*.cpp plugins/taco2LSTMCellPlugin/*.cu plugins/taco2ModulationRemovalPlugin/*.cpp plugins/taco2ModulationRemovalPlugin/*.cu plugins/taco2PrenetPlugin/*.cpp plugins/taco2PrenetPlugin/*.cu plugins/taco2ProjectionPlugin/*.cpp plugins/taco2ProjectionPlugin/*.cu ) set(TARGET_NAME "tt2i") ## library add_library(${TARGET_NAME} ${TACOTRON2WAVEGLOW_SOURCES}) target_link_libraries(${TARGET_NAME} cublas nvinfer nvonnxparser ) set_property(TARGET ${TARGET_NAME} PROPERTY CUDA_RESOLVE_DEVICE_SYMBOLS ON) set_property(TARGET ${TARGET_NAME} PROPERTY ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) include_directories( ./ ./denoiser/ ./layers/ ./tacotron2/ ./util/ ./waveglow/ ./common/ ./plugins/taco2AttentionPlugin/ ./plugins/taco2DenoiseTransformPlugin/ ./plugins/taco2LSTMCellPlugin/ ./plugins/taco2ModulationRemovalPlugin/ ./plugins/taco2PrenetPlugin/ ./plugins/taco2ProjectionPlugin/ )
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/transforms
transforms
__init__
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .transforms import Compose from .transforms import Resize from .transforms import RandomHorizontalFlip from .transforms import ToTensor from .transforms import Normalize from .build import build_transforms
PyTorch/Forecasting/TFT/scripts
scripts
run_traffic_DGX1-16G
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. : ${SEED:=1} : ${LR:=1e-3} : ${NGPU:=8} : ${BATCH_SIZE:=1024} : ${EPOCHS:=20} python -m torch.distributed.run --nproc_per_node=${NGPU} train.py \ --dataset traffic \ --data_path /data/processed/traffic_bin \ --batch_size=${BATCH_SIZE} \ --sample 450000 50000 \ --lr ${LR} \ --epochs ${EPOCHS} \ --seed ${SEED} \ --use_amp \ --results /results/TFT_traffic_bs${NGPU}x${BATCH_SIZE}_lr${LR}/seed_${SEED}
PyTorch/Translation/Transformer/fairseq/data
data
language_pair_dataset
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # #------------------------------------------------------------------------- # # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from torch.utils.data import Dataset, ConcatDataset from . import data_utils import itertools import os import sys from fairseq.data import IndexedInMemoryDataset, IndexedRawTextDataset class LanguagePairDataset(Dataset): """A pair of torch.utils.data.Datasets.""" def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, max_source_positions=1024, max_target_positions=1024, pad_sequence=1, shuffle=True, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.pad_sequence = pad_sequence self.shuffle = shuffle print("| Sentences are being padded to multiples of: {}".format(self.pad_sequence), file=sys.stderr) def __getitem__(self, index): return { 'id': index, 'source': self.src[index], 'target': self.tgt[index] if self.tgt is not None else None, } def __len__(self): return len(self.src) def collater(self, samples): """Merge a list of samples to form a mini-batch.""" return data_utils.collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.src_dict.eos(), left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, pad_sequence=self.pad_sequence, ) def num_tokens(self, index): """Return an example's length (number of tokens), used for batching.""" orig_size = max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0) assert self.pad_sequence > 0, "Padding multiple has to be greater than 0" size = 0 if self.pad_sequence > 1: size = orig_size // self.pad_sequence * self.pad_sequence if orig_size % self.pad_sequence > 0: size += self.pad_sequence else: size = orig_size return size #return max(self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0) def ordered_indices(self, seed=None, epoch=1): """Ordered indices for batching.""" if self.shuffle: indices = np.random.RandomState(seed + epoch).permutation(len(self)) else: indices = np.arange(len(self)) if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')] return indices[np.argsort(self.src_sizes[indices], kind='mergesort')] def valid_size(self, index, max_positions): """Check if an example's size is valid according to max_positions.""" max_source_positions, max_target_positions = self._get_max_positions(max_positions) return ( self.src_sizes[index] <= max_source_positions and (self.tgt_sizes is None or self.tgt_sizes[index] <= max_target_positions) ) def _get_max_positions(self, max_positions): if max_positions is None: return self.max_source_positions, self.max_target_positions assert len(max_positions) == 2 max_src_pos, max_tgt_pos = max_positions return min(self.max_source_positions, max_src_pos), min(self.max_target_positions, max_tgt_pos) def load_dataset(args, datasets, split, src_dict, tgt_dict, combine=False): """Load a dataset split.""" def split_exists(split, src, tgt, lang): filename = os.path.join(args.data, '{}.{}-{}.{}'.format(split, src, tgt, lang)) if args.raw_text and IndexedRawTextDataset.exists(filename): return True elif not args.raw_text and IndexedInMemoryDataset.exists(filename): return True return False def indexed_dataset(path, dictionary): if args.raw_text: return IndexedRawTextDataset(path, dictionary) elif IndexedInMemoryDataset.exists(path): return IndexedInMemoryDataset(path, fix_lua_indexing=True) return None src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') # infer langcode src, tgt = args.source_lang, args.target_lang if split_exists(split_k, src, tgt, src): prefix = os.path.join(args.data, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src): prefix = os.path.join(args.data, '{}.{}-{}.'.format(split_k, tgt, src)) else: if k > 0: break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, args.data)) src_datasets.append(indexed_dataset(prefix + src, src_dict)) tgt_datasets.append(indexed_dataset(prefix + tgt, tgt_dict)) print('| {} {} {} examples'.format(args.data, split_k, len(src_datasets[-1]))) if not combine: break assert len(src_datasets) == len(tgt_datasets) if len(src_datasets) == 1: src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0] src_sizes = src_dataset.sizes tgt_sizes = tgt_dataset.sizes else: src_dataset = ConcatDataset(src_datasets) tgt_dataset = ConcatDataset(tgt_datasets) src_sizes = np.concatenate([ds.sizes for ds in src_datasets]) tgt_sizes = np.concatenate([ds.sizes for ds in tgt_datasets]) datasets[split] = LanguagePairDataset( src_dataset, src_sizes, src_dict, tgt_dataset, tgt_sizes, tgt_dict, left_pad_source=args.left_pad_source, left_pad_target=args.left_pad_target, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, pad_sequence=args.pad_sequence, ) def load_dataset_splits(args, splits, src_dict, tgt_dict): datasets = {} for split in splits: if split == 'train': load_dataset(args, datasets, split, src_dict, tgt_dict, combine=True) else: for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') try: load_dataset(args, datasets, split_k, src_dict, tgt_dict, combine=False) except FileNotFoundError as e: if k > 0: break raise e return datasets
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch
fastpitch
transformer_jit
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional import torch import torch.nn as nn import torch.nn.functional as F from common.utils import mask_from_lens class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz: Optional[int] = None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1) if bsz is not None: return pos_emb[None, :, :].expand(bsz, -1, -1) else: return pos_emb[None, :, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False): super(PositionwiseFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = self.CoreNet(inp) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class PositionwiseConvFF(nn.Module): def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False): super(PositionwiseConvFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)), nn.ReLU(), # nn.Dropout(dropout), # worse convergence nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(self.layer_norm(core_out)) core_out = core_out.transpose(1, 2) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = inp.transpose(1, 2) core_out = self.CoreNet(core_out) core_out = core_out.transpose(1, 2) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.scale = 1 / (d_head ** 0.5) self.dropout = dropout self.pre_lnorm = pre_lnorm self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) def forward(self, inp, attn_mask: Optional[torch.Tensor] = None): residual = inp if self.pre_lnorm: # layer normalization inp = self.layer_norm(inp) n_head, d_head = self.n_head, self.d_head head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1) head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head) head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head) head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head) q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head) attn_score = torch.bmm(q, k.transpose(1, 2)) attn_score.mul_(self.scale) if attn_mask is not None: attn_mask = attn_mask.unsqueeze(1) attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1) attn_score.masked_fill_(attn_mask, -float('inf')) attn_prob = F.softmax(attn_score, dim=2) attn_prob = self.dropatt(attn_prob) attn_vec = torch.bmm(attn_prob, v) attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head) attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view( inp.size(0), inp.size(1), n_head * d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = residual + attn_out else: # residual connection + layer normalization # XXX Running TorchScript on 20.02 and 20.03 containers crashes here # XXX Works well with 20.01-py3 container. # XXX dirty fix is: # XXX output = self.layer_norm(residual + attn_out).half() output = self.layer_norm(residual + attn_out) return output class TransformerLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout, **kwargs): super(TransformerLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, mask): output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2)) output *= mask output = self.pos_ff(output) output *= mask return output class FFTransformer(nn.Module): def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt, dropemb=0.0, embed_input=True, n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False): super(FFTransformer, self).__init__() self.d_model = d_model self.n_head = n_head self.d_head = d_head self.padding_idx = padding_idx self.n_embed = n_embed self.embed_input = embed_input if embed_input: self.word_emb = nn.Embedding(n_embed, d_embed or d_model, padding_idx=self.padding_idx) else: self.word_emb = nn.Identity() self.pos_emb = PositionalEmbedding(self.d_model) self.drop = nn.Dropout(dropemb) self.layers = nn.ModuleList() for _ in range(n_layer): self.layers.append( TransformerLayer( n_head, d_model, d_head, d_inner, kernel_size, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm) ) def forward(self, dec_inp, seq_lens: Optional[torch.Tensor] = None, conditioning: Optional[torch.Tensor] = None): if not self.embed_input: inp = dec_inp assert seq_lens is not None mask = mask_from_lens(seq_lens).unsqueeze(2) else: inp = self.word_emb(dec_inp) # [bsz x L x 1] mask = (dec_inp != self.padding_idx).unsqueeze(2) pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype) pos_emb = self.pos_emb(pos_seq) * mask if conditioning is not None: out = self.drop(inp + pos_emb + conditioning) else: out = self.drop(inp + pos_emb) for layer in self.layers: out = layer(out, mask=mask) # out = self.drop(out) return out, mask
TensorFlow2/Classification/ConvNets/scripts
scripts
dali_index
#!/bin/bash # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SRC_DIR=${1} DST_DIR=${2} echo "Creating training file indexes" mkdir -p ${DST_DIR} for file in ${SRC_DIR}/train-*; do BASENAME=$(basename $file) DST_NAME=$DST_DIR/$BASENAME echo "Creating index $DST_NAME for $file" tfrecord2idx $file $DST_NAME done echo "Creating validation file indexes" for file in ${SRC_DIR}/validation-*; do BASENAME=$(basename $file) DST_NAME=$DST_DIR/$BASENAME echo "Creating index $DST_NAME for $file" tfrecord2idx $file $DST_NAME done
TensorFlow2/Segmentation/UNet_Medical/runtime
runtime
setup
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import multiprocessing import numpy as np import tensorflow as tf import horovod.tensorflow as hvd import dllogger as logger from dllogger import StdOutBackend, Verbosity, JSONStreamBackend def set_flags(params): os.environ['CUDA_CACHE_DISABLE'] = '1' os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private' os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0' os.environ['TF_ADJUST_HUE_FUSED'] = '1' os.environ['TF_ADJUST_SATURATION_FUSED'] = '1' os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' os.environ['TF_SYNC_ON_FINISH'] = '0' os.environ['TF_AUTOTUNE_THRESHOLD'] = '2' os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0' np.random.seed(params.seed) tf.random.set_seed(params.seed) if params.use_xla: tf.config.optimizer.set_jit(True) gpus = tf.config.experimental.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') tf.config.optimizer.set_experimental_options({'remapping': False}) tf.config.threading.set_intra_op_parallelism_threads(1) tf.config.threading.set_inter_op_parallelism_threads(max(2, (multiprocessing.cpu_count() // hvd.size()) - 2)) if params.use_amp: tf.keras.mixed_precision.experimental.set_policy('mixed_float16') def prepare_model_dir(params): # model_dir = os.path.join(params.model_dir, "model_checkpoint") model_dir = params.model_dir if (hvd.rank() == 0 and not params.benchmark) else None if model_dir is not None: os.makedirs(model_dir, exist_ok=True) if ('train' in params.exec_mode) and (not params.resume_training): os.system('rm -rf {}/*'.format(model_dir)) return model_dir def get_logger(params): backends = [] if hvd.rank() == 0: backends += [StdOutBackend(Verbosity.VERBOSE)] if params.log_dir: backends += [JSONStreamBackend(Verbosity.VERBOSE, params.log_dir)] logger.init(backends=backends) logger.metadata("eval_dice_score", {"unit": None}) logger.metadata("throughput_test", {"unit": "images/s"}) logger.metadata("throughput_train", {"unit": "images/s"}) return logger
TensorFlow2/Recommendation/SIM
SIM
main
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random from pathlib import Path import click import dllogger import horovod.tensorflow as hvd import numpy as np import tensorflow as tf import tensorflow_addons as tfa from sim.data.dataloader import get_dataloader_tfrecord from sim.data.defaults import FILES_SELECTOR, TEST_MAPPING, TRAIN_MAPPING from sim.data.feature_spec import FeatureSpec from sim.models.dien_model import DIENModel from sim.models.din_model import DINModel from sim.models.sim_model import SIMModel from sim.utils.benchmark import PerformanceCalculator from sim.utils.gpu_affinity import set_affinity from sim.utils.losses import build_sim_loss_fn, dien_auxiliary_loss_fn from sim.utils.misc import csv_str_to_int_list, dist_print def init_checkpoint_manager(model, optimizer, save_checkpoint_path, load_checkpoint_path): checkpoint = tf.train.Checkpoint( model=model, optimizer=optimizer, epoch=tf.Variable(-1, name='epoch') ) checkpoint_manager = tf.train.CheckpointManager( checkpoint=checkpoint, directory=save_checkpoint_path, max_to_keep=1, ) if load_checkpoint_path != "": _maybe_restore_checkpoint( checkpoint=checkpoint, checkpoint_path=load_checkpoint_path ) return checkpoint_manager def _maybe_restore_checkpoint(checkpoint, checkpoint_path): # Needed here to support different save and load checkpoint paths checkpoint_manager = tf.train.CheckpointManager( checkpoint=checkpoint, directory=checkpoint_path, max_to_keep=1, ) checkpoint.restore(checkpoint_manager.latest_checkpoint).expect_partial() if checkpoint_manager.latest_checkpoint: dist_print(f"Model restored from checkpoint {checkpoint_path}") else: dist_print(f"Failed to restore model from checkpoint {checkpoint_path}") def init_logger(results_dir, filename): if hvd.rank() == 0: os.makedirs(results_dir, exist_ok=True) log_path = os.path.join(results_dir, filename) dllogger.init( backends=[ dllogger.JSONStreamBackend( verbosity=dllogger.Verbosity.VERBOSE, filename=log_path ), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE), ] ) dllogger.metadata("test_auc", {"unit": None}) dllogger.metadata("latency_p90", {"unit": "ms"}) dllogger.metadata("train_loss", {"unit": None}) dllogger.metadata("time_to_train", {"unit": "s"}) dllogger.metadata("throughput", {"unit": "samples/s"}) else: dllogger.init(backends=[]) # In the future, select one of available dataloaders there (tfrecord, csv, etc...) def get_data_iterator(paths, feature_spec, batch_size, num_gpus, long_seq_length, prefetch_size, num_parallel_calls=None, repeat_count=0, drop_remainder=False, amp=False, disable_cache=False, prebatch_size=0): return get_dataloader_tfrecord( paths, feature_spec=feature_spec, batch_size=batch_size, long_seq_length=long_seq_length, num_gpus=num_gpus, id=hvd.rank(), drop_remainder=drop_remainder, repeat_count=repeat_count, disable_cache=disable_cache, prefetch_buffer_size=prefetch_size, num_parallel_calls=num_parallel_calls, prebatch_size=prebatch_size ) def build_model_and_loss(model_params): model_type = model_params["model_type"] if model_type == "sim": model = SIMModel( model_params['feature_spec'], mlp_hidden_dims=model_params["mlp_hidden_dims"], embedding_dim=model_params["embedding_dim"], dropout_rate=model_params["dropout_rate"] ) classification_loss_fn = build_sim_loss_fn() @tf.function def model_fn(batch, training=True): input_data, targets = batch # take the mask for N-1 timesteps from prepared input data mask_for_aux_loss = input_data["short_sequence_mask"][:, 1:] # model forward pass output_dict = model(input_data, training=training) # compute loss classification_loss = classification_loss_fn( targets, output_dict["stage_one_logits"], output_dict["stage_two_logits"] ) dien_aux_loss = dien_auxiliary_loss_fn( output_dict["aux_click_probs"], output_dict["aux_noclick_probs"], mask=mask_for_aux_loss, ) total_loss = classification_loss + dien_aux_loss logits = output_dict["stage_two_logits"] loss_dict = { "total_loss": total_loss, "classification_loss": classification_loss, "dien_aux_loss": dien_aux_loss } return (targets, logits), loss_dict elif model_type == "dien": model = DIENModel( model_params['feature_spec'], mlp_hidden_dims={ "classifier": model_params["mlp_hidden_dims"]["stage_2"], "aux": model_params["mlp_hidden_dims"]["aux"], }, embedding_dim=model_params["embedding_dim"], ) classification_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True) @tf.function def model_fn(batch, training=True): input_data, targets = batch # take the mask for N-1 timesteps from prepared input data mask_for_aux_loss = input_data["short_sequence_mask"][:, 1:] # model forward pass output_dict = model(input_data, training=training) # compute loss classification_loss = classification_loss_fn(targets, output_dict["logits"]) dien_aux_loss = dien_auxiliary_loss_fn( output_dict["aux_click_probs"], output_dict["aux_noclick_probs"], mask=mask_for_aux_loss, ) total_loss = classification_loss + dien_aux_loss logits = output_dict["logits"] loss_dict = { "total_loss": total_loss, "classification_loss": classification_loss, "dien_aux_loss": dien_aux_loss } return (targets, logits), loss_dict elif model_type == "din": model = DINModel( model_params['feature_spec'], mlp_hidden_dims=model_params["mlp_hidden_dims"]["stage_2"], embedding_dim=model_params["embedding_dim"] ) classification_loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True) @tf.function def model_fn(batch, training=True): input_data, targets = batch # model forward pass output_dict = model(input_data, training=training) # compute loss total_loss = classification_loss_fn( targets, output_dict["logits"] ) logits = output_dict["logits"] loss_dict = {"total_loss": total_loss} return (targets, logits), loss_dict return model, model_fn @tf.function def _update_auc(auc_accumulator, targets, logits): auc_accumulator.update_state(targets, logits) def eval(model_fn, data_iterator, num_thresholds=8000, prefix=""): auc_accumulator = tf.keras.metrics.AUC( num_thresholds=num_thresholds, name="auc_accumulator", from_logits=True ) distributed = hvd.size() != 1 local_logits = [] local_targets = [] local_total_losses = [] for batch in data_iterator: (targets, logits), loss_dict = model_fn(batch, training=False) local_logits.append(logits) local_targets.append(targets) local_total_losses.append(loss_dict["total_loss"]) locals = [local_logits, local_targets, local_total_losses] for i, local in enumerate(locals): # wrap empty lists in tensor to allow tf.concat if len(local) == 0: local = tf.constant(local) # concat all local variables into a single tensor if local is local_total_losses: local = tf.stack(local, 0) else: local = tf.concat(local, 0) # for single element lists, tf.concat will produce shape=() instead of shape=(1,). # reshape it for hvd.allgather to work if len(local.shape) == 0: local = tf.reshape(local, -1) locals[i] = local logits, targets, total_losses = locals if distributed: # gather from all nodes logits = hvd.allgather(logits) targets = hvd.allgather(targets) total_losses = hvd.allgather(total_losses) if hvd.rank() == 0: # need to convert it to a dataset first split_batch_size = local_logits[0].shape[0] metrics_ds = tf.data.Dataset.from_tensor_slices((targets, logits)).batch(split_batch_size) # run batched version of metrics update for targets, logits in metrics_ds: _update_auc(auc_accumulator, targets, logits) loss = tf.reduce_mean(total_losses).numpy().item() auc = auc_accumulator.result().numpy().item() else: loss = 0. auc = 0. return {f"{prefix}auc": auc, f"{prefix}loss": loss} @tf.function def model_step(batch, model, model_fn, optimizer, amp, first_batch): with tf.GradientTape() as tape: _, loss_dict = model_fn(batch, training=True) loss = loss_dict["total_loss"] scaled_loss = optimizer.get_scaled_loss(loss) if amp else loss tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True, compression=hvd.Compression.fp16) grads = tape.gradient(scaled_loss, model.trainable_variables) grads = optimizer.get_unscaled_gradients(grads) if amp else grads optimizer.apply_gradients(zip(grads, model.trainable_variables)) if first_batch: hvd.broadcast_variables(model.variables, root_rank=0) hvd.broadcast_variables(optimizer.variables(), root_rank=0) return loss_dict def run_single_epoch(model, model_fn, data_iterator, optimizer, amp, start_epoch, epoch, benchmark, performance_calculator): for current_step, batch in enumerate(data_iterator): if benchmark and performance_calculator.completed: break is_first_batch = (current_step == 0 and epoch == 0) step_dict = model_step(batch, model, model_fn, optimizer, amp, is_first_batch) step_dict = {key: val.numpy().item() for key, val in step_dict.items()} n_samples = len(batch[1]) step_throughput = performance_calculator(n_samples) step_dict["samples/s"] = step_throughput dllogger.log(data=step_dict, step=(start_epoch + epoch, current_step)) def train(model, model_fn, data_iterator_train, data_iterator_test, optimizer, amp, epochs, benchmark, performance_calculator, save_checkpoint, checkpoint_manager): """Train and evaluate the model for a given number of epochs.""" performance_calculator.init() all_epochs_results = [] start_epoch = checkpoint_manager.checkpoint.epoch.numpy().item() + 1 for epoch in range(epochs - start_epoch): run_single_epoch(model, model_fn, data_iterator_train, optimizer, amp, start_epoch, epoch, benchmark, performance_calculator) if not benchmark: # we dump throughput results for consecutive epochs for a regular training job (w/o --benchmark flag) results_data = performance_calculator.get_current_benchmark_results() all_epochs_results.append(results_data) results_eval_train = eval(model_fn, data_iterator_train, prefix="train_") results_eval_test = eval(model_fn, data_iterator_test, prefix="test_") results_data.update(results_eval_train) results_data.update(results_eval_test) if save_checkpoint: checkpoint_manager.checkpoint.epoch.assign(epoch) checkpoint_manager.save() if hvd.rank() == 0: dllogger.log(data=results_data, step=(start_epoch + epoch,)) performance_calculator.init() # restart for another epoch elif performance_calculator.completed: break if benchmark: results_perf = performance_calculator.results if not performance_calculator.completed: # model steps have been exhausted or all steps should be included to calculate throughput results_perf = performance_calculator.get_current_benchmark_results() if hvd.rank() == 0: dllogger.log(data=results_perf, step=tuple()) else: # calculate convergence metrics time_to_train = sum([epoch_result['time'] for epoch_result in all_epochs_results]) results = {'time_to_train': time_to_train} results.update(results_eval_train) results.update(results_eval_test) if hvd.rank() == 0: dllogger.log(data=results, step=tuple()) def inference(model, data_iterator, benchmark, performance_calculator): """Forward pass for the model and data loader given.""" performance_calculator.init() for current_step, (input_data, targets) in enumerate(data_iterator): if benchmark and performance_calculator.completed: break model(input_data, training=False, compute_aux_loss=False) step_throughput = performance_calculator(len(targets)) dllogger.log(data={"samples/s": step_throughput}, step=(0, current_step)) results_perf = performance_calculator.results if not performance_calculator.completed: results_perf = performance_calculator.get_current_benchmark_results() if hvd.rank() == 0: dllogger.log(data=results_perf, step=tuple()) @click.command() @click.option( "--mode", default="train", help="Script mode: available options are 'train' to train and evaluate the model " "and 'inference' to perform forward pass over a given dataset", type=click.Choice(["train", "inference"]), ) @click.option( "--dataset_dir", required=True, help="Path to the dataset directory.", type=str, ) @click.option( "--feature_spec", default='feature_spec.yaml', help="Name of the feature spec file in the dataset directory.", type=str ) @click.option( "--results_dir", default="/tmp/sim", help="Path to the model files storage.", type=str, ) @click.option( "--log_filename", default="log.json", help="Name of the file to store dllogger output.", type=str, ) @click.option( "--long_seq_length", default=90, help="length of long history sequence", type=int ) @click.option( "--optimizer", default="adam", help="Optimizer to use [adam/lazy_adam/sgd].", type=click.Choice(["adam", "lazy_adam", "sgd"]), ) @click.option( "--affinity", default="socket_unique_interleaved", help="Type of CPU affinity", type=click.Choice([ "socket", "single", "single_unique", "socket_unique_interleaved", "socket_unique_continuous", "disabled", ], ), ) @click.option( "--seed", default=-1, help="Random seed.", type=int ) @click.option( "--lr", default=0.01, help="Learning rate of the selected optimizer.", type=float ) @click.option( "--dropout_rate", default=-1, help="Dropout rate for all the classification MLPs (default: -1, disabled).", type=float ) @click.option( "--weight_decay", default=0, help="Parameters decay of the selected optimizer.", type=float ) @click.option( "--embedding_dim", default=16, help="Embedding dimension.", type=int ) @click.option( "--global_batch_size", default=131072, help="Batch size used to train/eval the model.", type=int ) @click.option( "--num_parallel_calls", default=None, help="Parallelism level for tf.data API. If None, heuristic based on number of CPUs and number of GPUs will be used." ) @click.option( "--epochs", default=3, help="Train for the following number of epochs.", type=int ) @click.option("--disable_cache", help="disable dataset caching.", is_flag=True) @click.option("--drop_remainder", help="Drop remainder batch for training set.", is_flag=True) @click.option( "--repeat_count", default=0, help="Repeat training dataset this number of times.", type=int ) @click.option( "--benchmark", is_flag=True ) @click.option( "--benchmark_steps", default=0, help="Number of steps to use for performance benchmarking. Use benchmark_steps <= 0 to include all iterations. " "This parameter has no effect when the script is launched without --benchmark flag.", type=int ) @click.option( "--benchmark_warmup_steps", default=20, help="Number of warmup steps to use for performance benchmarking (benchmark_warmup_steps <= 0 means no warmup).", type=int ) @click.option( "--stage_one_mlp_dims", default="200", help="MLP hidden dimensions for stage one (excluding classification output).", type=str, ) @click.option( "--stage_two_mlp_dims", default="200,80", help="MLP hidden dimensions for stage two (excluding classification output).", type=str, ) @click.option( "--aux_mlp_dims", default="100,50", help="MLP hidden dimensions for aux loss (excluding classification output).", type=str, ) @click.option( "--model_type", default="sim", type=click.Choice(["sim", "din", "dien"]) ) @click.option("--save_checkpoint_path", default="", type=str) @click.option("--load_checkpoint_path", default="", type=str) @click.option("--amp", is_flag=True) @click.option("--xla", is_flag=True) @click.option( "--inter_op_parallelism", default=0, help="Number of inter op threads.", type=int ) @click.option( "--intra_op_parallelism", default=0, help="Number of intra op threads.", type=int ) @click.option( "--prefetch_train_size", default=10, help="Number of batches to prefetch in training. " ) @click.option( "--prefetch_test_size", default=2, help="Number of batches to prefetch in testing" ) @click.option( "--prebatch_train_size", default=0, help="Information about batch size applied during preprocessing to train dataset" ) @click.option( "--prebatch_test_size", default=0, help="Information about batch size applied during preprocessing to test dataset" ) def main( mode: str, dataset_dir: str, feature_spec: str, results_dir: str, log_filename: str, long_seq_length: int, save_checkpoint_path: str, load_checkpoint_path: str, model_type: str, optimizer: str, affinity: str, seed: int, lr: float, dropout_rate: float, weight_decay: float, embedding_dim: int, global_batch_size: int, num_parallel_calls: int, epochs: int, disable_cache: bool, drop_remainder: bool, repeat_count: int, benchmark: bool, benchmark_steps: int, benchmark_warmup_steps: int, stage_one_mlp_dims: str, stage_two_mlp_dims: str, aux_mlp_dims: str, xla: bool, amp: bool, inter_op_parallelism: int, intra_op_parallelism: int, prefetch_train_size: int, prefetch_test_size: int, prebatch_train_size: int, prebatch_test_size: int ): hvd.init() if seed >= 0: random.seed(seed) np.random.seed(seed) tf.random.set_seed(seed) if affinity != "disabled": gpu_id = hvd.local_rank() affinity = set_affinity( gpu_id=gpu_id, nproc_per_node=hvd.size(), mode=affinity ) dist_print(f"{gpu_id}: thread affinity: {affinity}") init_logger(results_dir, log_filename) gpus = tf.config.list_physical_devices('GPU') for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.set_visible_devices(gpus[hvd.local_rank()], 'GPU') if amp: tf.keras.mixed_precision.set_global_policy("mixed_float16") if inter_op_parallelism > 0: tf.config.threading.set_inter_op_parallelism_threads(inter_op_parallelism) if intra_op_parallelism > 0: tf.config.threading.set_intra_op_parallelism_threads(intra_op_parallelism) if xla: tf.config.optimizer.set_jit(True) if optimizer == "adam": optimizer = tfa.optimizers.AdamW(learning_rate=lr, weight_decay=weight_decay) elif optimizer == "lazy_adam": optimizer = tfa.optimizers.LazyAdam(lr) elif optimizer == "sgd": optimizer = tfa.optimizers.SGDW(learning_rate=lr, weight_decay=weight_decay) optimizer = hvd.DistributedOptimizer(optimizer, compression=hvd.Compression.fp16) if amp: optimizer = tf.keras.mixed_precision.LossScaleOptimizer(optimizer, dynamic=True) num_gpus = hvd.size() if global_batch_size % num_gpus != 0: raise ValueError('Global batch size must be divisible by number of gpus. Otherwise it may result in deadlock.') batch_size = global_batch_size // num_gpus """ In case of: - benchmark: we can load only 1 batch and operate on it for benchmark_steps times (in preload fashion). - training: we can repeat via a flag """ dataset_dir = Path(dataset_dir) feature_spec = FeatureSpec.from_yaml(dataset_dir / feature_spec) # since each tfrecord file must include all of the features, it is enough to read first chunk for each split. train_files = [dataset_dir / file for file in feature_spec.source_spec[TRAIN_MAPPING][0][FILES_SELECTOR]] data_iterator_train = get_data_iterator( train_files, feature_spec, batch_size, num_gpus, long_seq_length, repeat_count=repeat_count, drop_remainder=drop_remainder, amp=amp, disable_cache=disable_cache, prefetch_size=prefetch_train_size, num_parallel_calls=num_parallel_calls, prebatch_size=prebatch_train_size ) if mode == "train": test_files = [dataset_dir / file for file in feature_spec.source_spec[TEST_MAPPING][0][FILES_SELECTOR]] data_iterator_test = get_data_iterator( test_files, feature_spec, batch_size, num_gpus, long_seq_length, amp=amp, disable_cache=disable_cache, prefetch_size=prefetch_test_size, num_parallel_calls=num_parallel_calls, prebatch_size=prebatch_test_size ) else: data_iterator_test = [] # otherwise not used stage_one_mlp_dims = csv_str_to_int_list(stage_one_mlp_dims) stage_two_mlp_dims = csv_str_to_int_list(stage_two_mlp_dims) aux_mlp_dims = csv_str_to_int_list(aux_mlp_dims) model_params = { "feature_spec": feature_spec, "embedding_dim": embedding_dim, "mlp_hidden_dims": { "stage_1": stage_one_mlp_dims, "stage_2": stage_two_mlp_dims, "aux": aux_mlp_dims }, "dropout_rate": dropout_rate, "model_type": model_type } model, model_fn = build_model_and_loss(model_params) checkpoint_manager = init_checkpoint_manager( model, optimizer, save_checkpoint_path, load_checkpoint_path ) save_checkpoint = save_checkpoint_path != "" and hvd.rank() == 0 performance_calculator = PerformanceCalculator( benchmark_warmup_steps, benchmark_steps ) if mode == "train": train(model, model_fn, data_iterator_train, data_iterator_test, optimizer, amp, epochs, benchmark, performance_calculator, save_checkpoint, checkpoint_manager) elif mode == "inference": inference(model, data_iterator_train, benchmark, performance_calculator) if __name__ == "__main__": main()
TensorFlow/Segmentation/UNet_3D_Medical/scripts
scripts
unet3d_train_single_TF-AMP
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches 3D-UNet run TF-AMP training on fold 0 for 16000 iterations each. # Usage: # bash examples/unet3d_train_single_TF-AMP.sh <number/of/gpus> <path/to/dataset> <path/to/results/directory> <batch/size> horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --exec_mode train_and_evaluate --augment --max_steps 16000 --batch_size $4 --xla --amp --fold 0
TensorFlow2/Segmentation/nnUNet/scripts
scripts
train
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from pathlib import Path from subprocess import call parser = ArgumentParser() parser.add_argument("--task", type=str, default="01", help="Task code") parser.add_argument("--dim", type=int, required=True, choices=[2, 3], help="Dimension of UNet") parser.add_argument("--gpus", type=int, default=1, help="Number of gpus") parser.add_argument("--seed", type=int, default=1, help="Random seed") parser.add_argument("--learning_rate", type=float, default=3e-4) parser.add_argument("--fold", type=int, required=True, choices=[0, 1, 2, 3, 4], help="Fold number") parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision") parser.add_argument("--tta", action="store_true", help="Enable test time augmentation") parser.add_argument("--horovod", action="store_true", help="Launch horovod within script") parser.add_argument("--bind", action="store_true", help="Bind CPUs for each GPU. Improves throughput for multi-GPU.") parser.add_argument("--results", type=Path, default=Path("/results"), help="Path to results directory") parser.add_argument("--logname", type=str, default="train_log.json", help="Name of the dlloger output") if __name__ == "__main__": args = parser.parse_args() skip = 100 if args.gpus == 1 else 150 path_to_main = Path(__file__).resolve().parent.parent / "main.py" cmd = f"horovodrun -np {args.gpus} " if args.horovod else "" if args.bind: cmd += "bindpcie --cpu=exclusive,nosmt " cmd += f"python {path_to_main} --exec-mode train --deep_supervision --xla --skip-eval {skip} " cmd += f"--task {args.task} " cmd += f"--dim {args.dim} " cmd += f"--epochs {300 if args.gpus == 1 else 600} " cmd += f"--batch-size {2 if args.dim == 3 else 64} " cmd += f"--learning_rate {args.learning_rate} " cmd += f"--fold {args.fold} " cmd += f"--amp {args.amp} " cmd += f"--tta {args.tta} " cmd += f"--results {args.results} " cmd += f"--logname {args.logname} " cmd += f"--gpus {args.gpus} " cmd += f"--seed {args.seed} " call(cmd, shell=True)
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn
nn
evaluator
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import tensorflow as tf import time from .nn_utils import create_inputs_dict class Evaluator: def __init__(self, model, timer, auc_thresholds, max_steps=None, cast_dtype=None, distributed=False): self.model = model self.timer = timer self.max_steps = max_steps self.cast_dtype = cast_dtype self.distributed = distributed if self.distributed: import horovod.tensorflow as hvd self.hvd = hvd else: self.hvd = None self.auc_metric = tf.keras.metrics.AUC(num_thresholds=auc_thresholds, curve='ROC', summation_method='interpolation', from_logits=True) self.bce_op = tf.keras.losses.BinaryCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True) def _reset(self): self.latencies, self.all_test_losses = [], [] self.auc_metric.reset_state() @tf.function def update_auc_metric(self, labels, y_pred): self.auc_metric.update_state(labels, y_pred) @tf.function def compute_bce_loss(self, labels, y_pred): return self.bce_op(labels, y_pred) def _step(self, pipe): begin = time.time() batch = pipe.get_next() (numerical_features, categorical_features), labels = batch if self.cast_dtype is not None: numerical_features = tf.cast(numerical_features, self.cast_dtype) inputs = create_inputs_dict(numerical_features, categorical_features) y_pred = self.model(inputs, sigmoid=False, training=False) end = time.time() self.latencies.append(end - begin) if self.distributed: y_pred = self.hvd.allgather(y_pred) labels = self.hvd.allgather(labels) self.timer.step_test() if not self.distributed or self.hvd.rank() == 0: self.update_auc_metric(labels, y_pred) test_loss = self.compute_bce_loss(labels, y_pred) self.all_test_losses.append(test_loss) def __call__(self, validation_pipeline): self._reset() auc, test_loss = 0, 0 pipe = iter(validation_pipeline.op()) num_steps = len(validation_pipeline) if self.max_steps is not None and self.max_steps >= 0: num_steps = min(num_steps, self.max_steps) for _ in range(num_steps): self._step(pipe) if not self.distributed or self.hvd.rank() == 0: auc = self.auc_metric.result().numpy().item() test_loss = tf.reduce_mean(self.all_test_losses).numpy().item() return auc, test_loss, self.latencies
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library
library
onnx2trt_conv
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Dict, Iterable, Optional # pytype: disable=import-error import onnx import tensorrt as trt from ..core import BaseConverter, Format, Model, Precision, ShapeSpec from ..extensions import converters from .utils import get_input_shapes # pytype: enable=import-error LOGGER = logging.getLogger(__name__) TRT_LOGGER = trt.Logger(trt.Logger.INFO) class Onnx2TRTConverter(BaseConverter): def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str): self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size) cuda_engine = onnx2trt( model.handle, shapes=input_shapes, max_workspace_size=self._max_workspace_size, max_batch_size=self._max_batch_size, model_precision=self._precision.value, ) return model._replace(handle=cuda_engine) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 def onnx2trt( onnx_model: onnx.ModelProto, *, shapes: Dict[str, ShapeSpec], max_workspace_size: int, max_batch_size: int, model_precision: str, ) -> "trt.ICudaEngine": """ Converts onnx model to TensorRT ICudaEngine Args: onnx_model: onnx.Model to convert shapes: dictionary containing min shape, max shape, opt shape for each input name max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time. max_batch_size: The maximum batch size which can be used at execution time, and also the batch size for which the CudaEngine will be optimized. model_precision: precision of kernels (possible values: fp16, fp32) Returns: TensorRT ICudaEngine """ # Whether or not 16-bit kernels are permitted. # During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled. fp16_mode = "16" in model_precision builder = trt.Builder(TRT_LOGGER) builder.fp16_mode = fp16_mode builder.max_batch_size = max_batch_size builder.max_workspace_size = max_workspace_size # In TensorRT 7.0, the ONNX parser only supports full-dimensions mode, # meaning that your network definition must be created with the explicitBatch flag set. # For more information, see # https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flags) with trt.OnnxParser(network, TRT_LOGGER) as parser: # onnx model parsing if not parser.parse(onnx_model.SerializeToString()): for i in range(parser.num_errors): LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}") raise RuntimeError("Error during parsing ONNX model (see logs for details)") # optimization config = builder.create_builder_config() config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16) config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for name, spec in shapes.items(): profile.set_shape(name, **spec._asdict()) config.add_optimization_profile(profile) engine = builder.build_engine(network, config=config) return engine converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
TensorFlow/LanguageModeling/BERT/data
data
check
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv o = csv.reader(open("data/biobert/chemprot-data_treeLSTM/dev.tsv", "r"), delimiter="\t") nv = csv.reader(open("data/biobert/ChemProt_NV/dev.tsv", "r"), delimiter="\t") count = {} for l, i in enumerate(nv): if l == 0: continue if count.get(i[0].split(".")[0], None) is None: count[i[0].split(".")[0]] = 0 count[i[0].split(".")[0]] += 1 count_1 = {} for i in o: if count_1.get(i[0], None) is None: count_1[i[0]] = 0 count_1[i[0]] += 1 for k in count.keys(): if count[k] != count_1[k]: print(k, count[k], count_1[k]) # import os # import csv # import zipfile # import argparse # class ChemProtTextFormatting: # """A basic formatter to preprocess the chemprot dataset. # """ # def __init__(self, input_folder, output_folder): # chemprot_folder = input_folder # with zipfile.ZipFile(os.path.join(chemprot_folder, "ChemProt_Corpus.zip"), "r") as zip: # zip.extractall(chemprot_folder) # chemprot_folder = os.path.join(input_folder, "ChemProt_Corpus") # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_development.zip")) as zip: # zip.extractall(chemprot_folder) # if not os.path.exists(output_folder): # os.makedirs(output_folder) # self.format(os.path.join(chemprot_folder, "chemprot_development"), # "chemprot_development_entities.tsv", "chemprot_development_relations.tsv", # "chemprot_development_abstracts.tsv", os.path.join(output_folder, "dev.tsv")) # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_test_gs.zip")) as zip: # zip.extractall(chemprot_folder) # self.format(os.path.join(chemprot_folder, "chemprot_test_gs"), # "chemprot_test_entities_gs.tsv", "chemprot_test_relations_gs.tsv", # "chemprot_test_abstracts_gs.tsv", os.path.join(output_folder, "test.tsv")) # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_training.zip")) as zip: # zip.extractall(chemprot_folder) # self.format(os.path.join(chemprot_folder, "chemprot_training"), # "chemprot_training_entities.tsv", "chemprot_training_relations.tsv", # "chemprot_training_abstracts.tsv", os.path.join(output_folder, "train.tsv")) # def format(self, chemprot_path, entity_filename, relations_filename, abstracts_filename, output_filename): # """ # Constructs ChemProt dataset for Relation Extraction. # Args: # chemprot_path: Path to files # entity_filename: Contains labelled mention annotations of chemical compounds and genes/proteins. # <PMID> <EntityNumber> <Type of Entity> <Start Character offset> <End Character Offset> <Text String> # relations_filename: Contains a subset of chemical-protein relations annotations for the Chemprot dataset # <PMID> <CPR Group> <EntityNumber1> <EntityNumber2> # abstracts_filename: Contains plain text CHEMPROT PubMed Data # <PMID> <Title of the Article> <Abstract of the Article> # output_filename: Path to output file that will contain preprocessed data # <PMID.EntityNumber1.EntityNumber2> <Preprocessed Sentence> <CPR Group> # """ # data = {} # train_entities = csv.reader(open(os.path.join(chemprot_path, entity_filename), # mode="r"), delimiter="\t") # for entity in train_entities: # id = entity[0] # if data.get(id, None) is None: # data[id] = {"relations":[], "entities":{}} # data[id]["entities"][entity[1]] = (int(entity[3]), int(entity[4]), entity[2]) # train_relations=csv.reader(open(os.path.join(chemprot_path, relations_filename), # mode="r"), delimiter="\t") # for relation in train_relations: # try: # id = relation[0] # data[id]["relations"].append((relation[1], relation[2], relation[4].split("Arg1:")[-1], relation[5].split("Arg2:")[-1])) # except: # print("invalid id") # raise ValueError # with open(output_filename, 'w') as ofile: # train_abstracts = csv.reader(open(os.path.join(chemprot_path, abstracts_filename), # mode="r"), delimiter="\t") # owriter = csv.writer(ofile, delimiter='\t', lineterminator=os.linesep) # owriter.writerow(["index", "sentence", "label"]) # num_sentences = 0 # rejected = 0 # for abstract in train_abstracts: # id = abstract[0] # line = abstract[1] + abstract[2] # for relation in data[id]["relations"]: # tag1 = relation[2] # tag2 = relation[3] # start = 0 # for sentence in line.split("."): # end = start + len(sentence) # if data[id]["entities"][tag1][0] >= start and data[id]["entities"][tag2][0] >= start and \ # data[id]["entities"][tag1][1] <= end and data[id]["entities"][tag2][1] <= end: # for offset_start, offset_end, word in sorted([(data[id]["entities"][tag1][0], data[id]["entities"][tag1][1], data[id]["entities"][tag1][2]), # (data[id]["entities"][tag2][0], data[id]["entities"][tag2][1], data[id]["entities"][tag2][2])], # reverse=True): # sentence = sentence[:offset_start-start-1] + "@" + word + "$" + sentence[offset_end-start-1:] # sentence = sentence.strip() # owriter.writerow([id+"."+tag1+"."+tag2, sentence, relation[0] if relation[1] == "Y " else "false"]) # num_sentences += 1 # if id == "10064839": # print(tag1, tag2, start, end, offset_start, offset_end, "yes") # break # else: # rejected += 1 # if id == "10064839": # print(tag1, tag2, start, end, data[id]["entities"][tag1][0], data[id]["entities"][tag1][1], data[id]["entities"][tag2][0], data[id]["entities"][tag2][1]) # start = end + 1 # print("Succesfully written {} samples to {}".format(num_sentences, output_filename)) # print("Rejected are", rejected) # if __name__=="__main__": # parser = argparse.ArgumentParser( # description='Preprocessing Application for ChemProt' # ) # parser.add_argument( # '--input_folder', # type=str, # help='Specify the input files in a comma-separated list (no spaces)' # ) # parser.add_argument( # '--output_folder', # type=str, # help='Specify the input files in a comma-separated list (no spaces)' # ) # args = parser.parse_args() # preprocess_chemprot = ChemProtTextFormatting(args.input_folder, args.output_folder) # # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # # you may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # import os # import csv # import zipfile # import argparse # class ChemProtTextFormatting: # """A basic formatter to preprocess the chemprot dataset. # """ # def __init__(self, input_folder, output_folder): # chemprot_folder = input_folder # with zipfile.ZipFile(os.path.join(chemprot_folder, "ChemProt_Corpus.zip"), "r") as zip: # zip.extractall(chemprot_folder) # chemprot_folder = os.path.join(input_folder, "ChemProt_Corpus") # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_development.zip")) as zip: # zip.extractall(chemprot_folder) # if not os.path.exists(output_folder): # os.makedirs(output_folder) # self.format(os.path.join(chemprot_folder, "chemprot_development"), # "chemprot_development_entities.tsv", "chemprot_development_relations.tsv", # "chemprot_development_abstracts.tsv", os.path.join(output_folder, "dev.tsv")) # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_test_gs.zip")) as zip: # zip.extractall(chemprot_folder) # self.format(os.path.join(chemprot_folder, "chemprot_test_gs"), # "chemprot_test_entities_gs.tsv", "chemprot_test_relations_gs.tsv", # "chemprot_test_abstracts_gs.tsv", os.path.join(output_folder, "test.tsv")) # with zipfile.ZipFile(os.path.join(chemprot_folder, "chemprot_training.zip")) as zip: # zip.extractall(chemprot_folder) # self.format(os.path.join(chemprot_folder, "chemprot_training"), # "chemprot_training_entities.tsv", "chemprot_training_relations.tsv", # "chemprot_training_abstracts.tsv", os.path.join(output_folder, "train.tsv")) # def format(self, chemprot_path, entity_filename, relations_filename, abstracts_filename, output_filename): # """ # Constructs ChemProt dataset for Relation Extraction. # Args: # chemprot_path: Path to files # entity_filename: Contains labelled mention annotations of chemical compounds and genes/proteins. # <PMID> <EntityNumber> <Type of Entity> <Start Character offset> <End Character Offset> <Text String> # relations_filename: Contains a subset of chemical-protein relations annotations for the Chemprot dataset # <PMID> <CPR Group> <EntityNumber1> <EntityNumber2> # abstracts_filename: Contains plain text CHEMPROT PubMed Data # <PMID> <Title of the Article> <Abstract of the Article> # output_filename: Path to output file that will contain preprocessed data # <PMID.EntityNumber1.EntityNumber2> <Preprocessed Sentence> <CPR Group> # """ # data = {} # train_entities = csv.reader(open(os.path.join(chemprot_path, entity_filename), # mode="r"), delimiter="\t") # for entity in train_entities: # id = entity[0] # if data.get(id, None) is None: # data[id] = {"relations": {}, "entities": {"CHEMICAL": {"00": (0, 0, None)}, "GENE": {}}} # data[id]["entities"]["CHEMICAL" if entity[2] == "CHEMICAL" else "GENE"][entity[1]] = ( # int(entity[3]), int(entity[4]), entity[2]) # train_relations = csv.reader(open(os.path.join(chemprot_path, relations_filename), # mode="r"), delimiter="\t") # for relation in train_relations: # try: # id = relation[0] # data[id]["relations"][(relation[4].split("Arg1:")[-1], relation[5].split("Arg2:")[-1])] = relation[ # 1] if relation[2] == "Y " else "false" # except: # print("invalid id") # raise ValueError # # print(data[list(data.keys())[0]]) # with open(output_filename, 'w') as ofile: # train_abstracts = csv.reader(open(os.path.join(chemprot_path, abstracts_filename), # mode="r"), delimiter="\t") # owriter = csv.writer(ofile, delimiter='\t', lineterminator=os.linesep) # owriter.writerow(["index", "sentence", "label"]) # num_sentences = 0 # rejected = 0 # for abstract in train_abstracts: # id = abstract[0] # line = abstract[1] + abstract[2] # for tag1 in data[id]["entities"]["CHEMICAL"].keys(): # for tag2 in data[id]["entities"]["GENE"].keys(): # relation = data[id]["relations"].get((tag2, tag1), None) # relation = data[id]["relations"].get((tag1, tag2), None) if relation is None else relation # if relation is None: # relation = "false" # start = 0 # for sentence in line.split("."): # original_sentence = sentence # end = start + len(sentence) # tag1_details = data[id]["entities"]["CHEMICAL"][tag1] # tag2_details = data[id]["entities"]["GENE"][tag2] # if ((tag1_details[2] is None) or ( # tag1_details[0] >= start and tag1_details[1] <= end)) and \ # (tag2_details[0] >= start and tag2_details[1] <= end): # for offset_start, offset_end, value in sorted( # list(data[id]["entities"]["CHEMICAL"].values()) + list( # data[id]["entities"]["GENE"].values()), # reverse=True): # if offset_start < start or offset_end > end or value is None: # continue # word = value if (offset_start, offset_end) == ( # tag1_details[0], tag1_details[1]) or (offset_start, offset_end) == ( # tag2_details[0], tag2_details[1]) else "OTHER" # sentence = sentence[:offset_start - start - 1] + "@" + word + "$" + sentence[ # offset_end - start - 1:] # sentence = sentence.strip() # owriter.writerow([id + "." + tag1 + "." + tag2, sentence, relation]) # num_sentences += 1 # # if id == list(data.keys())[0]: # # print(original_sentence, sentence) # # break # else: # rejected += 1 # if id == "10064839": # # print(tag1, tag2, start, end, tag1_details[0], tag1_details[1], tag2_details[0], tag2_details[1]) # pass # start = end + 1 # print("Succesfully written {} samples to {}".format(num_sentences, output_filename)) # print("Rejected are", rejected) # if __name__ == "__main__": # parser = argparse.ArgumentParser( # description='Preprocessing Application for ChemProt' # ) # parser.add_argument( # '--input_folder', # type=str, # help='Specify the input files in a comma-separated list (no spaces)' # ) # parser.add_argument( # '--output_folder', # type=str, # help='Specify the input files in a comma-separated list (no spaces)' # ) # args = parser.parse_args() # preprocess_chemprot = ChemProtTextFormatting(args.input_folder, args.output_folder)
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cuda
cuda
match_proposals
/** * Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <torch/torch.h> #include <vector> #include <iostream> __launch_bounds__(256) static __global__ void max_along_gt_idx(float *match, unsigned char *pred_forgiven, long *max_gt_idx, long long gt,long long preds, bool include_low_quality, float low_th, float high_th) { long long tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < preds){ float max_iou = 0.0f; int max_idx = 0; float iou; for(long long i = 0;i < gt; i++){ iou = match[i * preds + tid]; if (iou > max_iou) {max_iou = iou; max_idx = i;} } if (max_iou >= high_th) max_gt_idx[tid] = max_idx; else if ((pred_forgiven[tid] == 1 && include_low_quality)) max_gt_idx[tid] = max_idx; else if (max_iou < low_th) max_gt_idx[tid] = -1; else if (max_iou < high_th) max_gt_idx[tid] = -2; } } __device__ void warpReduce(volatile float* sdata, int tid) { sdata[tid] = fmax(sdata[tid],sdata[tid + 32]); sdata[tid] = fmax(sdata[tid],sdata[tid + 16]); sdata[tid] = fmax(sdata[tid],sdata[tid + 8]); sdata[tid] = fmax(sdata[tid],sdata[tid + 4]); sdata[tid] = fmax(sdata[tid],sdata[tid + 2]); sdata[tid] = fmax(sdata[tid],sdata[tid + 1]); } static __global__ void max_along_preds(float* match, float* inter_gt, long long gt,long long preds) { int gt_idx = blockIdx.x; int chunk_idx = blockIdx.y; int gt_offset = chunk_idx * 2048; int start_idx = gt_idx * preds + gt_offset; int idx = threadIdx.x; __shared__ float shbuf[1024]; shbuf[idx] = 0.0f; __syncthreads(); if(gt_offset + idx + 1024 < preds) shbuf[idx] = fmax(match[start_idx + idx], match[start_idx + idx + 1024]); else if (gt_offset + idx < preds) shbuf[idx] = match[start_idx + idx]; __syncthreads(); if(idx < 512) shbuf[idx] = fmax(shbuf[idx],shbuf[idx + 512]); __syncthreads(); if(idx < 256) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 256]); __syncthreads(); if(idx < 128) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 128]); __syncthreads(); if(idx < 64) shbuf[idx] = fmax(shbuf[idx], shbuf[idx + 64]); __syncthreads(); if(idx < 32) warpReduce(shbuf, idx); if (idx == 0) inter_gt[((preds + 2047) / 2048) * gt_idx + chunk_idx] = shbuf[idx]; } __launch_bounds__(256) static __global__ void max_along_preds_reduced(float *match, float *max_preds, long long gt,long long preds) { long long tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < gt){ float max_iou = 0.0f; float iou; for(long long i = 0; i < preds; i++){ iou = match[tid * preds + i]; if (iou > max_iou) max_iou = iou; } max_preds[tid] = max_iou; } } __launch_bounds__(256) static __global__ void forgive_preds(float *match_quality_data, float *d_best_pred_per_gt, unsigned char *d_pred_forgiven, long gt, long preds) { long tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < preds){ unsigned char forgiven = 0; float iou; for(int i = 0; i < gt; i++){ iou = match_quality_data[i * preds + tid]; if(iou == d_best_pred_per_gt[i]){ forgiven = 1; break; } } d_pred_forgiven[tid] = forgiven; } } at::Tensor match_proposals_cuda(at::Tensor match_quality_matrix, bool allow_low_quality_matches, float low_th, float high_th){ int gt = match_quality_matrix.size(0); long long preds = match_quality_matrix.size(1); float *match_quality_data = match_quality_matrix.data_ptr<float>(); using namespace at; //predictions are reduced by chunks of 2048 elements per block int num_chunks = (preds + 2047) / 2048; auto result = torch::ones({preds}, torch::CUDA(at::kLong)); at::Tensor best_pred_per_gt = torch::zeros({gt}, torch::CUDA(at::kFloat)); at::Tensor pred_forgiven = torch::zeros({preds}, torch::CUDA(at::kByte)); at::Tensor intergt = torch::zeros({gt * num_chunks}, torch::CUDA(at::kFloat)); auto stream = at::cuda::getCurrentCUDAStream(); //do an intermediate reduction along all predictions for each gt dim3 block(1024, 1, 1); dim3 grid(gt, num_chunks, 1); if (allow_low_quality_matches) max_along_preds<<<grid, block, 0, stream.stream()>>>( match_quality_matrix.data_ptr<float>(), intergt.data_ptr<float>(), gt, preds); //final reduction to find best iou per gt int numThreads = 256; int numBlocks=(gt + numThreads - 1) / numThreads; if (allow_low_quality_matches) max_along_preds_reduced<<<numBlocks, numThreads, 0, stream.stream()>>>( intergt.data_ptr<float>(), best_pred_per_gt.data_ptr<float>(), gt, num_chunks); numBlocks=(preds + numThreads - 1) / numThreads; //if low_quality_matches are allowed, mark some predictions to keep their best matching gt even though //iou < threshold if (allow_low_quality_matches) forgive_preds<<<numBlocks, numThreads, 0, stream.stream()>>>( match_quality_matrix.data_ptr<float>(), best_pred_per_gt.data_ptr<float>(), pred_forgiven.data_ptr<unsigned char>(), gt, preds); //compute resulting tensor of indices max_along_gt_idx<<<numBlocks, numThreads, 0, stream.stream()>>>(match_quality_matrix.data_ptr<float>(), pred_forgiven.data_ptr<unsigned char>(), result.data_ptr<long>(), gt, preds, allow_low_quality_matches, low_th, high_th); return result; }
PyTorch/SpeechSynthesis/Tacotron2/waveglow
waveglow
data_function
# ***************************************************************************** # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # *****************************************************************************\ import torch import tacotron2_common.layers as layers from tacotron2_common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu class MelAudioLoader(torch.utils.data.Dataset): """ 1) loads audio,text pairs 2) computes mel-spectrograms from audio files. """ def __init__(self, dataset_path, audiopaths_and_text, args): self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text) self.max_wav_value = args.max_wav_value self.sampling_rate = args.sampling_rate self.stft = layers.TacotronSTFT( args.filter_length, args.hop_length, args.win_length, args.n_mel_channels, args.sampling_rate, args.mel_fmin, args.mel_fmax) self.segment_length = args.segment_length def get_mel_audio_pair(self, filename): audio, sampling_rate = load_wav_to_torch(filename) if sampling_rate != self.stft.sampling_rate: raise ValueError("{} {} SR doesn't match target {} SR".format( sampling_rate, self.stft.sampling_rate)) # Take segment if audio.size(0) >= self.segment_length: max_audio_start = audio.size(0) - self.segment_length audio_start = torch.randint(0, max_audio_start + 1, size=(1,)).item() audio = audio[audio_start:audio_start+self.segment_length] else: audio = torch.nn.functional.pad( audio, (0, self.segment_length - audio.size(0)), 'constant').data audio = audio / self.max_wav_value audio_norm = audio.unsqueeze(0) audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False) melspec = self.stft.mel_spectrogram(audio_norm) melspec = melspec.squeeze(0) return (melspec, audio, len(audio)) def __getitem__(self, index): return self.get_mel_audio_pair(self.audiopaths_and_text[index][0]) def __len__(self): return len(self.audiopaths_and_text) def batch_to_gpu(batch): x, y, len_y = batch x = to_gpu(x).float() y = to_gpu(y).float() len_y = to_gpu(torch.sum(len_y)) return ((x, y), y, len_y)
PyTorch/Translation
Translation
README
# Machine Translation Machine Translation is the task of translation text from one language to another. Simply replacing one word with it's equivalent in another language rarely produces a semantically meaningful translation, because that may not account for the phrase-level meaning at all. A good machine translation system may require modeling whole sentences or phrases. Use of Neural Networks has allowed end-to-end architectures that can accomplish this, mapping from input text to the corresponding output text.A good model should be able to handle challenges like morphologically rich languages and very large vocalbularies well, while maintaining reasonable training and inference times. This Collection contains state-of-the-art models and containers that can help with the task of Machine Translation. In this collection, we will cover: - Challenges in Machine Translation - Model architecture - Where to get started --- ## Challenges in Machine Translation Ages before, it was very time consuming to translate the text from an unfamiliar language. Adopting simple vocabularies with word-for-word translation was challenging for two purposes: 1) the user had to know the grammar rules, and 2) must keep in mind all language transcriptions while translating the whole sentence. Presently, we don't need to struggle so much– we can translate phrases, sentences, and even large texts just by putting them in Google Translate. If the Google Translator tried to keep the translations for even short sentences, it wouldn't work because of the massive number of possible variations. The most useful approach can be to train the machine sets of grammar rules and translate them accordingly. If only it were as easy as it sounds. Suppose you have ever tried discovering a foreign language. In that case, you comprehend that there are always many exceptions to rules when we try to capture all these rules, limitations, and exceptions to the program's peculiarities, the quality of translation fragments down. --- ## Model architecture i) Google’s Neural Machine Translation: Sequence-to-Sequence (seq2seq) models are used for several Natural Language Processing (NLP) jobs, such as text summarization, speech recognition, and nucleotide sequence modeling. We aim to translate the provided sentences from one language to another. Here, both the input and output are sentences. In another way, these sentences are a sequence of words proceeding in and out of the network. It is the fundamental purpose of Sequence-to-Sequence modeling. The figure underneath tries to demonstrate this technique. ![Basic Architecture](img/6_machine-translation-figure-1.png) Source - https://developer.nvidia.com/blog/introduction-neural-machine-translation-with-gpus/ The GNMT v2 model is related to the one addressed in [Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation](https://arxiv.org/abs/1609.08144) paper. The most crucial difference between the two models is in the attention mechanism. In the version2 (v2) model, the decoder's output from the primary LSTM layer goes into the attention module. The re-weighted setting is then concatenated with inputs to all subsequent LSTM layers in the decoder at the present step. ![Basic Architecture](img/6_machine-translation-figure-2.png) ii) Transformer based Neural Machine Translation: The Transformer model uses typical NMT encoder-decoder architecture. Unlike other NMT models, this method uses no repeated contacts and works on a rigid-sized context windowpane. The encoder stack is made up of N identical layers. The individual layer is composed of the subsequent sublayers: 1. Self-attention layer 2. Feedforward network (which is two fully-connected layers) Like the encoder stack, the decoder stack comprises N identical layers. Each layer is composed of the sublayers: 1. Self-attention, layer 2. Multi-headed attention layer merging encoder outputs with events from the previous self-attention layer. 3. Feedforward network (2 fully-connected layers) The encoder uses self-attention to calculate a representation of the input sequence. The decoder generates the output sequence one token at a time, taking the encoder output and former decoder-outputted tickets as inputs. The model also applies embeddings on the input and output tokens and adds a fixed positional encoding. The positional encoding adds knowledge about the location of each token. ![Basic Architecture](img/6_machine-translation-figure-3.png) Source - [Attention is all you Need](https://arxiv.org/abs/1706.03762) --- ## Where to get started NVIDIA provides Deep Learning Examples for Image Segmentation on its GitHub repository. These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case. Here are the examples relevant for image segmentation, directly from [Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples): 1. Machine translation with GNMT using PyTorch - [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Translation/GNMT) - Uses TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow) 2. Machine translation with Transformers using PyTorch - [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Translation/Transformer) - Uses PyTorch 20.03-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp16/jasper-ts-trace-ensemble
jasper-ts-trace-ensemble
config
name: "jasper-ts-trace-ensemble" platform: "ensemble" max_batch_size: 8#MAX_BATCH input { name: "AUDIO_SIGNAL" data_type: TYPE_FP16 dims: -1#AUDIO_LENGTH } input { name: "NUM_SAMPLES" data_type: TYPE_INT32 dims: [ 1 ] } output { name: "TRANSCRIPT" data_type: TYPE_INT32 dims: [-1] } ensemble_scheduling { step { model_name: "feature-extractor-ts-trace" model_version: -1 input_map { key: "input__0" value: "AUDIO_SIGNAL" } input_map { key: "input__1" value: "NUM_SAMPLES" } output_map { key: "output__0" value: "AUDIO_FEATURES" } } step { model_name: "jasper-ts-trace" model_version: -1 input_map { key: "input__0" value: "AUDIO_FEATURES" } output_map { key: "output__0" value: "CHARACTER_PROBABILITIES" } } step { model_name: "decoder-ts-script" model_version: -1 input_map { key: "input__0" value: "CHARACTER_PROBABILITIES" } output_map { key: "output__0" value: "TRANSCRIPT" } } }
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/trtis_client
trtis_client
run_trtis_benchmark_client
#!/bin/bash IS=128 TEXT="$(echo "The forms of printed letters should be beautiful, and \ that their arrangement \ on the page should be reasonable and a help to the \ shapeliness of the letters \ themselves. The forms of printed letters should be \ beautiful, and that their \ arrangement on the page should be reasonable and a \ help to the shapeliness of \ the letters themselves." | head -c ${IS})" INPUT="benchmark.txt" if [[ "${#}" != 1 ]]; then echo "Invalid number of arguments '${#}'" echo "Usage:" echo "\t$0 <batch size>" exit 1 fi BS="${1}" if [[ -f "${INPUT}" ]]; then rm ${INPUT} fi for i in $(seq ${BS}); do echo "${TEXT}." >> "${INPUT}" done ( for i in {1..1000}; do ./run_trtis_client.sh "${INPUT}" "${BS}" done ) | awk 'BEGIN{i=0} /Total Processing time:/{ t[$4 "_" i] = $4; total+=$4;} /Processed [0-9]* sequences for a total of/{ v=($8/$2); len+=v; ++i; } END { i = 0; asort(t); for (k in t) { times[i] = t[k]; ++i; } print("Performed " i " runs."); as = len/i; avg = total/i; l50 = times[int(i*0.50)]; l90 = times[int(i*0.90)]; l95 = times[int(i*0.95)]; l99 = times[int(i*0.99)]; mels = int(as * 86.6); rtf = as / avg; std =0; for (k in times) { v = times[k]; std += (v-avg)*(v-avg) } std *= 1.0/(i-1); std = sqrt(std); print("batch size = '${BS}'"); print("input size = '${IS}'"); print("avg latency (s) = " avg ); print("latency std (s) = " std ); print("latency interval 50% (s) = " l50); print("latency interval 90% (s) = " l90); print("latency interval 95% (s) = " l95); print("latency interval 99% (s) = " l99); print("average mels generated = " mels); print("average audio generated (s) = " as); print("average real-time factor = " rtf); }' rm "${INPUT}"
TensorFlow2/LanguageModeling/ELECTRA/data
data
BooksDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess class BooksDownloader: def __init__(self, save_path): self.save_path = save_path pass def download(self): bookscorpus_download_command = 'python3 /workspace/bookcorpus/download_files.py --list /workspace/bookcorpus/url_list.jsonl --out' bookscorpus_download_command += ' ' + self.save_path + '/bookscorpus' bookscorpus_download_command += ' --trash-bad-count' bookscorpus_download_process = subprocess.run(bookscorpus_download_command, shell=True, check=True)
TensorFlow/LanguageModeling/BERT
BERT
run_re
# coding=utf-8 # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import csv import logging import os, sys import numpy as np import tensorflow as tf sys.path.append("/workspace/bert") import modeling import optimization import tokenization import time import horovod.tensorflow as hvd from utils.utils import LogEvalRunHook, LogTrainRunHook, setup_xla_flags from utils.gpu_affinity import set_affinity import utils.dllogger_class from dllogger import Verbosity flags = tf.flags FLAGS = flags.FLAGS ## Required parameters flags.DEFINE_string( "data_dir", None, "The input data dir. Should contain the .tsv files (or other data files) " "for the task.") flags.DEFINE_string( "bert_config_file", None, "The config json file corresponding to the pre-trained BERT model. " "This specifies the model architecture.") flags.DEFINE_string("task_name", None, "The name of the task to train.") flags.DEFINE_string("vocab_file", None, "The vocabulary file that the BERT model was trained on.") flags.DEFINE_string( "output_dir", None, "The output directory where the model checkpoints will be written.") ## Other parameters flags.DEFINE_string( "dllog_path", "/results/bert_dllog.json", "filename where dllogger writes to") flags.DEFINE_string( "init_checkpoint", None, "Initial checkpoint (usually from a pre-trained BERT model).") flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text. Should be True for uncased " "models and False for cased models.") flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization. " "Sequences longer than this will be truncated, and sequences shorter " "than this will be padded.") flags.DEFINE_bool("do_train", False, "Whether to run training.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_bool( "do_predict", False, "Whether to run the model in inference mode on the test set.") flags.DEFINE_integer("train_batch_size", 16, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.") flags.DEFINE_float("learning_rate", 5e-6, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_bool("horovod", False, "Whether to use Horovod for multi-gpu runs") flags.DEFINE_bool("amp", True, "Whether to enable AMP ops. When false, uses TF32 on A100 and FP32 on V100 GPUS.") flags.DEFINE_bool("use_xla", True, "Whether to enable XLA JIT compilation.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class PaddingInputExample(object): """Fake example so the num input examples is a multiple of the batch size. When running eval/predict on the TPU, we need to pad the number of examples to be a multiple of the batch size, because the TPU requires a fixed batch size. The alternative is to drop the last batch, which is bad because it means the entire output data won't be generated. We use this class instead of `None` because treating `None` as padding battches could cause silent errors. """ class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id, is_real_example=True): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id self.is_real_example = is_real_example class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for prediction.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with tf.io.gfile.GFile(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: lines.append(line) return lines class BioBERTChemprotProcessor(DataProcessor): """Processor for the BioBERT data set obtained from (https://github.com/arwhirang/recursive_chemprot/tree/master/Demo/tree_LSTM/data). """ def get_train_examples(self, data_dir, file_name="trainingPosit_chem"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "train") def get_dev_examples(self, data_dir, file_name="developPosit_chem"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "dev") def get_test_examples(self, data_dir, file_name="testPosit_chem"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "test") def get_labels(self): """See base class.""" return ["CPR:3", "CPR:4", "CPR:5", "CPR:6", "CPR:9", "False"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) if set_type == "test": text_a = tokenization.convert_to_unicode(line[1]) label = "False" else: text_a = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[2]) if label == "True": label = tokenization.convert_to_unicode(line[3]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class _ChemProtProcessor(DataProcessor): """Processor for the ChemProt data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir, file_name="dev.tsv"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "dev") def get_test_examples(self, data_dir, file_name="test.tsv"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "test") def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): # skip header if i == 0: continue guid = line[0] text_a = tokenization.convert_to_unicode(line[1]) if set_type == "test": label = self.get_labels()[-1] else: try: label = tokenization.convert_to_unicode(line[2]) except IndexError: logging.exception(line) exit(1) examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class ChemProtProcessor(_ChemProtProcessor): def get_labels(self): """See base class.""" return ["CPR:3", "CPR:4", "CPR:5", "CPR:6", "CPR:9", "false"] class MedNLIProcessor(DataProcessor): def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") def get_dev_examples(self, data_dir, file_name="dev.tsv"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "dev") def get_test_examples(self, data_dir, file_name="test.tsv"): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, file_name)), "test") def get_labels(self): """See base class.""" return ['contradiction', 'entailment', 'neutral'] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = line[1] text_a = tokenization.convert_to_unicode(line[2]) text_b = tokenization.convert_to_unicode(line[3]) if set_type == "test": label = self.get_labels()[-1] else: label = tokenization.convert_to_unicode(line[0]) examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): """Converts a single `InputExample` into a single `InputFeatures`.""" if isinstance(example, PaddingInputExample): return InputFeatures( input_ids=[0] * max_seq_length, input_mask=[0] * max_seq_length, segment_ids=[0] * max_seq_length, label_id=0, is_real_example=False) label_map = {} for (i, label) in enumerate(label_list): label_map[label] = i tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) if tokens_b: # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[0:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = [] segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) if tokens_b: for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] if ex_index < 5: tf.compat.v1.logging.info("*** Example ***") tf.compat.v1.logging.info("guid: %s" % (example.guid)) tf.compat.v1.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.compat.v1.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.compat.v1.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.compat.v1.logging.info("label: %s (id = %d)" % (example.label, label_id)) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id, is_real_example=True) return feature def file_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file): """Convert a set of `InputExample`s to a TFRecord file.""" writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature([feature.label_id]) features["is_real_example"] = create_int_feature( [int(feature.is_real_example)]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) writer.close() def file_based_input_fn_builder(input_file, batch_size, seq_length, is_training, drop_remainder, hvd=None): """Creates an `input_fn` closure to be passed to TPUEstimator.""" name_to_features = { "input_ids": tf.io.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.io.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.io.FixedLenFeature([], tf.int64), "is_real_example": tf.io.FixedLenFeature([], tf.int64), } def _decode_record(record, name_to_features): """Decodes a record to a TensorFlow example.""" example = tf.parse_single_example(record, name_to_features) # tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): """The actual input function.""" #batch_size = params["batch_size"] # For training, we want a lot of parallel reading and shuffling. # For eval, we want no shuffling and parallel reading doesn't matter. d = tf.data.TFRecordDataset(input_file) if is_training: if hvd is not None: d = d.shard(hvd.size(), hvd.rank()) d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder)) return d return input_fn def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): """Creates a classification model.""" model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings) # In the demo, we are doing a simple classification task on the entire # segment. # # If you want to use the token-level output, use model.get_sequence_output() # instead. output_layer = model.get_pooled_output() hidden_size = output_layer.shape[-1].value output_weights = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02)) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer()) with tf.variable_scope("loss"): if is_training: # I.e., 0.1 dropout output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) logits = tf.matmul(output_layer, output_weights, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) probabilities = tf.nn.softmax(logits, axis=-1) log_probs = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_mean(per_example_loss) return (loss, per_example_loss, logits, probabilities) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate=None, num_train_steps=None, num_warmup_steps=None, use_one_hot_embeddings=False, hvd=None, amp=False): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.compat.v1.logging.info("*** Features ***") for name in sorted(features.keys()): tf.compat.v1.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_real_example = None if "is_real_example" in features: is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32) else: is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32) is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint and (hvd is None or hvd.rank() == 0): (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.compat.v1.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.compat.v1.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, hvd, False, amp) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op) elif mode == tf.estimator.ModeKeys.EVAL: dummy_op = tf.no_op() # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite if amp: loss_scaler = tf.train.experimental.FixedLossScale(1) dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite( optimization.LAMBOptimizer(learning_rate=0.0), loss_scaler) def metric_fn(per_example_loss, label_ids, logits, is_real_example): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy( labels=label_ids, predictions=predictions, weights=is_real_example) loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metric_ops = metric_fn(per_example_loss, label_ids, logits, is_real_example) output_spec = tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, eval_metric_ops=eval_metric_ops) else: dummy_op = tf.no_op() # Need to call mixed precision graph rewrite if fp16 to enable graph rewrite if amp: dummy_op = tf.train.experimental.enable_mixed_precision_graph_rewrite( optimization.LAMBOptimizer(learning_rate=0.0)) output_spec = tf.estimator.EstimatorSpec( mode=mode, predictions={"probabilities": probabilities})#predicts)#probabilities) return output_spec return model_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def input_fn_builder(features, seq_length, is_training, drop_remainder): """Creates an `input_fn` closure to be passed to TPUEstimator.""" all_input_ids = [] all_input_mask = [] all_segment_ids = [] all_label_ids = [] for feature in features: all_input_ids.append(feature.input_ids) all_input_mask.append(feature.input_mask) all_segment_ids.append(feature.segment_ids) all_label_ids.append(feature.label_id) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] num_examples = len(features) # This is for demo purposes and does NOT scale to large data sets. We do # not use Dataset.from_generator() because that uses tf.py_func which is # not TPU compatible. The right way to load data is with TFRecordReader. d = tf.data.Dataset.from_tensor_slices({ "input_ids": tf.constant( all_input_ids, shape=[num_examples, seq_length], dtype=tf.int32), "input_mask": tf.constant( all_input_mask, shape=[num_examples, seq_length], dtype=tf.int32), "segment_ids": tf.constant( all_segment_ids, shape=[num_examples, seq_length], dtype=tf.int32), "label_ids": tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32), }) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.batch(batch_size=batch_size, drop_remainder=drop_remainder) return d return input_fn # This function is not used by this file but is still used by the Colab and # people who depend on it. def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Convert a set of `InputExample`s to a list of `InputFeatures`.""" features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: tf.compat.v1.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) features.append(feature) return features def main(_): setup_xla_flags() tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO) dllogging = utils.dllogger_class.dllogger_class(FLAGS.dllog_path) if FLAGS.horovod: hvd.init() processors = { "chemprot": BioBERTChemprotProcessor, 'mednli': MedNLIProcessor, } tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint) if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict: raise ValueError( "At least one of `do_train`, `do_eval` or `do_predict' must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) tf.io.gfile.makedirs(FLAGS.output_dir) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 master_process = True training_hooks = [] global_batch_size = FLAGS.train_batch_size hvd_rank = 0 config = tf.compat.v1.ConfigProto() if FLAGS.horovod: global_batch_size = FLAGS.train_batch_size * hvd.size() master_process = (hvd.rank() == 0) hvd_rank = hvd.rank() config.gpu_options.visible_device_list = str(hvd.local_rank()) if hvd.size() > 1: training_hooks.append(hvd.BroadcastGlobalVariablesHook(0)) if FLAGS.use_xla: config.graph_options.optimizer_options.global_jit_level = tf.compat.v1.OptimizerOptions.ON_1 if FLAGS.amp: tf.enable_resource_variables() run_config = tf.estimator.RunConfig( model_dir=FLAGS.output_dir if master_process else None, session_config=config, save_checkpoints_steps=FLAGS.save_checkpoints_steps if master_process else None, keep_checkpoint_max=1) if master_process: tf.compat.v1.logging.info("***** Configuaration *****") for key in FLAGS.__flags.keys(): tf.compat.v1.logging.info(' {}: {}'.format(key, getattr(FLAGS, key))) tf.compat.v1.logging.info("**************************") train_examples = None num_train_steps = None num_warmup_steps = None training_hooks.append(LogTrainRunHook(global_batch_size, hvd_rank)) if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / global_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) start_index = 0 end_index = len(train_examples) tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record")] if FLAGS.horovod: tmp_filenames = [os.path.join(FLAGS.output_dir, "train.tf_record{}".format(i)) for i in range(hvd.size())] num_examples_per_rank = len(train_examples) // hvd.size() remainder = len(train_examples) % hvd.size() if hvd.rank() < remainder: start_index = hvd.rank() * (num_examples_per_rank+1) end_index = start_index + num_examples_per_rank + 1 else: start_index = hvd.rank() * num_examples_per_rank + remainder end_index = start_index + (num_examples_per_rank) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list), init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate if not FLAGS.horovod else FLAGS.learning_rate * hvd.size(), num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_one_hot_embeddings=False, hvd=None if not FLAGS.horovod else hvd, amp=FLAGS.amp) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config) if FLAGS.do_train: file_based_convert_examples_to_features( train_examples[start_index:end_index], label_list, FLAGS.max_seq_length, tokenizer, tmp_filenames[hvd_rank]) tf.compat.v1.logging.info("***** Running training *****") tf.compat.v1.logging.info(" Num examples = %d", len(train_examples)) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.compat.v1.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=tmp_filenames, batch_size=FLAGS.train_batch_size, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True, hvd=None if not FLAGS.horovod else hvd) train_start_time = time.time() estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=training_hooks) train_time_elapsed = time.time() - train_start_time train_time_wo_overhead = training_hooks[-1].total_time avg_sentences_per_second = num_train_steps * global_batch_size * 1.0 / train_time_elapsed ss_sentences_per_second = (num_train_steps - training_hooks[-1].skipped) * global_batch_size * 1.0 / train_time_wo_overhead if master_process: tf.compat.v1.logging.info("-----------------------------") tf.compat.v1.logging.info("Total Training Time = %0.2f for Sentences = %d", train_time_elapsed, num_train_steps * global_batch_size) tf.compat.v1.logging.info("Total Training Time W/O Overhead = %0.2f for Sentences = %d", train_time_wo_overhead, (num_train_steps - training_hooks[-1].skipped) * global_batch_size) tf.compat.v1.logging.info("Throughput Average (sentences/sec) with overhead = %0.2f", avg_sentences_per_second) tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second) dllogging.logger.log(step=(), data={"throughput_train": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT) tf.compat.v1.logging.info("-----------------------------") if FLAGS.do_eval and master_process: eval_examples = processor.get_dev_examples(FLAGS.data_dir) num_actual_eval_examples = len(eval_examples) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") file_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.compat.v1.logging.info("***** Running evaluation *****") tf.compat.v1.logging.info(" Num examples = %d (%d actual, %d padding)", len(eval_examples), num_actual_eval_examples, len(eval_examples) - num_actual_eval_examples) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.eval_batch_size) # This tells the estimator to run through the entire set. eval_steps = None eval_drop_remainder = False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, batch_size=FLAGS.eval_batch_size, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with tf.io.gfile.GFile(output_eval_file, "w") as writer: tf.compat.v1.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.compat.v1.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if FLAGS.do_predict and master_process: predict_examples = processor.get_test_examples(FLAGS.data_dir) num_actual_predict_examples = len(predict_examples) predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record") file_based_convert_examples_to_features(predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file) tf.compat.v1.logging.info("***** Running prediction*****") tf.compat.v1.logging.info(" Num examples = %d (%d actual, %d padding)", len(predict_examples), num_actual_predict_examples, len(predict_examples) - num_actual_predict_examples) tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size) predict_drop_remainder = False predict_input_fn = file_based_input_fn_builder( input_file=predict_file, batch_size=FLAGS.predict_batch_size, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=predict_drop_remainder) eval_hooks = [LogEvalRunHook(FLAGS.predict_batch_size)] eval_start_time = time.time() output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv") with tf.io.gfile.GFile(output_predict_file, "w") as writer: num_written_lines = 0 tf.compat.v1.logging.info("***** Predict results *****") for prediction in estimator.predict(input_fn=predict_input_fn, hooks=eval_hooks, yield_single_examples=True): probabilities = prediction["probabilities"] output_line = "\t".join( str(class_probability) for class_probability in probabilities) + "\n" writer.write(output_line) num_written_lines += 1 assert num_written_lines == num_actual_predict_examples eval_time_elapsed = time.time() - eval_start_time time_list = eval_hooks[-1].time_list time_list.sort() # Removing outliers (init/warmup) in throughput computation. eval_time_wo_overhead = sum(time_list[:int(len(time_list) * 0.99)]) num_sentences = (int(len(time_list) * 0.99)) * FLAGS.predict_batch_size avg = np.mean(time_list) cf_50 = max(time_list[:int(len(time_list) * 0.50)]) cf_90 = max(time_list[:int(len(time_list) * 0.90)]) cf_95 = max(time_list[:int(len(time_list) * 0.95)]) cf_99 = max(time_list[:int(len(time_list) * 0.99)]) cf_100 = max(time_list[:int(len(time_list) * 1)]) ss_sentences_per_second = num_sentences * 1.0 / eval_time_wo_overhead tf.compat.v1.logging.info("-----------------------------") tf.compat.v1.logging.info("Total Inference Time = %0.2f for Sentences = %d", eval_time_elapsed, eval_hooks[-1].count * FLAGS.predict_batch_size) tf.compat.v1.logging.info("Total Inference Time W/O Overhead = %0.2f for Sentences = %d", eval_time_wo_overhead, num_sentences) tf.compat.v1.logging.info("Summary Inference Statistics") tf.compat.v1.logging.info("Batch size = %d", FLAGS.predict_batch_size) tf.compat.v1.logging.info("Sequence Length = %d", FLAGS.max_seq_length) tf.compat.v1.logging.info("Precision = %s", "fp16" if FLAGS.amp else "fp32") tf.compat.v1.logging.info("Latency Confidence Level 50 (ms) = %0.2f", cf_50 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 90 (ms) = %0.2f", cf_90 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 95 (ms) = %0.2f", cf_95 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 99 (ms) = %0.2f", cf_99 * 1000) tf.compat.v1.logging.info("Latency Confidence Level 100 (ms) = %0.2f", cf_100 * 1000) tf.compat.v1.logging.info("Latency Average (ms) = %0.2f", avg * 1000) tf.compat.v1.logging.info("Throughput Average (sentences/sec) = %0.2f", ss_sentences_per_second) dllogging.logger.log(step=(), data={"throughput_val": ss_sentences_per_second}, verbosity=Verbosity.DEFAULT) tf.compat.v1.logging.info("-----------------------------") if __name__ == "__main__": flags.mark_flag_as_required("data_dir") flags.mark_flag_as_required("task_name") flags.mark_flag_as_required("vocab_file") flags.mark_flag_as_required("bert_config_file") flags.mark_flag_as_required("output_dir") tf.compat.v1.app.run()
TensorFlow2/LanguageModeling/BERT/data
data
GLUEDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import wget from pathlib import Path def mkdir(path): Path(path).mkdir(parents=True, exist_ok=True) class GLUEDownloader: def __init__(self, save_path): self.save_path = save_path + '/glue' def download(self, task_name): mkdir(self.save_path) if task_name in {'mrpc', 'mnli'}: task_name = task_name.upper() elif task_name == 'cola': task_name = 'CoLA' else: # SST-2 assert task_name == 'sst-2' task_name = 'SST' wget.download( 'https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py', out=self.save_path, ) sys.path.append(self.save_path) import download_glue_data download_glue_data.main( ['--data_dir', self.save_path, '--tasks', task_name]) sys.path.pop()
TensorFlow2/Detection/Efficientdet
Efficientdet
infer
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A simple example on how to use keras model for inference.""" import os from absl import app from absl import flags from absl import logging import numpy as np from PIL import Image import tensorflow as tf from utils import hparams_config from model import inference from model import efficientdet_keras flags.DEFINE_string('image_path', None, 'Location of test image.') flags.DEFINE_string('output_dir', None, 'Directory of annotated output images.') flags.DEFINE_string('model_dir', None, 'Location of the checkpoint to run.') flags.DEFINE_string('model_name', 'efficientdet-d0', 'Model name to use.') flags.DEFINE_string('hparams', '', 'Comma separated k=v pairs or a yaml file') flags.DEFINE_bool('debug', False, 'If true, run function in eager for debug.') flags.DEFINE_string('saved_model_dir', None, 'Saved model directory') FLAGS = flags.FLAGS def main(_): imgs = [np.array(Image.open(FLAGS.image_path))] * 2 # Create model config. config = hparams_config.get_efficientdet_config('efficientdet-d0') config.is_training_bn = False config.image_size = '1920x1280' config.nms_configs.score_thresh = 0.4 config.nms_configs.max_output_size = 100 config.override(FLAGS.hparams) # Use 'mixed_float16' if running on GPUs. policy = tf.keras.mixed_precision.experimental.Policy('float32') tf.keras.mixed_precision.experimental.set_policy(policy) tf.config.experimental_run_functions_eagerly(FLAGS.debug) # Create and run the model. model = efficientdet_keras.EfficientDetModel(config=config) model.build((None, None, None, 3)) model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir)) model.summary() class ExportModel(tf.Module): def __init__(self, model): super().__init__() self.model = model @tf.function def f(self, imgs): return self.model(imgs, training=False, post_mode='global') imgs = tf.convert_to_tensor(imgs, dtype=tf.uint8) export_model = ExportModel(model) if FLAGS.saved_model_dir: tf.saved_model.save( export_model, FLAGS.saved_model_dir, signatures=export_model.f.get_concrete_function( tf.TensorSpec(shape=(None, None, None, 3), dtype=tf.uint8))) export_model = tf.saved_model.load(FLAGS.saved_model_dir) boxes, scores, classes, valid_len = export_model.f(imgs) # Visualize results. for i, img in enumerate(imgs): length = valid_len[i] img = inference.visualize_image( img, boxes[i].numpy()[:length], classes[i].numpy().astype(np.int)[:length], scores[i].numpy()[:length], label_map=config.label_map, min_score_thresh=config.nms_configs.score_thresh, max_boxes_to_draw=config.nms_configs.max_output_size) output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg') Image.fromarray(img).save(output_image_path) print('writing annotated image to %s' % output_image_path) if __name__ == '__main__': flags.mark_flag_as_required('image_path') flags.mark_flag_as_required('output_dir') flags.mark_flag_as_required('model_dir') logging.set_verbosity(logging.ERROR) app.run(main)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trtis
trtis
CustomContext
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "denoiserLoader.h" #include "engineCache.h" #include "tacotron2Loader.h" #include "utils.h" #include "waveGlowLoader.h" #include "CustomContext.hpp" #include "CharacterMappingReader.hpp" #include "logging.h" #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Weffc++" #pragma GCC diagnostic ignored "-Wunused-parameter" #endif #include "src/core/model_config.h" #ifdef __GNUC__ #pragma GCC diagnostic pop #endif #include "NvInfer.h" #include <iostream> using namespace nvinfer1; using namespace tts; using ModelConfig = nvidia::inferenceserver::ModelConfig; using ModelParameter = nvidia::inferenceserver::ModelParameter; /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const char* const ENGINE_EXT = ".eng"; } /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { std::vector<std::string> generateErrorMessages() { std::vector<std::string> msgs(CustomContext::NUM_ERR_CODES); msgs[CustomContext::SUCCESS] = "success"; msgs[CustomContext::BAD_INPUT] = "bad_input"; msgs[CustomContext::BAD_TENSOR_SIZE] = "bad_tensor_size"; return msgs; } } /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ int CustomContext::create( const CustomInitializeData* const data, CustomContext** const customContext) { try { CustomContext* context = new CustomContext(data); *customContext = context; } catch (const std::exception& e) { std::cerr << "Failed to create CustomContext: " << e.what() << std::endl; return ErrorCode::ERROR; } return ErrorCode::SUCCESS; } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ CustomContext::CustomContext(const CustomInitializeData* const data) : TimedObject("CustomContext::execute()"), m_name(), m_logger(new Logger), m_synthesizer(nullptr), m_errMessages(generateErrorMessages()), m_inputLength(), m_outputLength(), m_inputHost(), m_outputHost(), m_reader(CharacterMapping::defaultMapping()), m_writer() { ModelConfig modelConfig; if (!modelConfig.ParseFromString(std::string(data->serialized_model_config, data->serialized_model_config_size))) { throw std::runtime_error("Failed to parse model config."); } m_name = data->instance_name; const std::string enginePath = modelConfig.parameters().at("engine_path").string_value(); try { const std::string characterMappingPath = modelConfig.parameters().at("mapping_path").string_value(); m_reader.setCharacterMapping( CharacterMappingReader::loadFromFile(characterMappingPath)); std::cout << "Loaded mapping from '" << characterMappingPath << "'." << std::endl; } catch (const std::exception& e) { std::cerr << "Failed to load character mapping due to: " << e.what() << std::endl; std::cerr << "Using default mapping." << std::endl; } // set cuda device cudaError_t err = cudaSetDevice(data->gpu_device_id); if (err != cudaSuccess) { throw std::runtime_error("Failed to set device to: " + std::to_string(data->gpu_device_id)); } TRTPtr<IBuilder> builder; { std::lock_guard<std::mutex> lock(m_mutex); builder.reset(createInferBuilder(*m_logger)); } EngineCache cache(m_logger); std::shared_ptr<Tacotron2Instance> tacotron2 = Tacotron2Loader::load( cache, *builder, enginePath + "/" + Tacotron2Instance::ENGINE_NAME + ENGINE_EXT, 400, false, modelConfig.max_batch_size()); std::shared_ptr<WaveGlowInstance> waveglow = WaveGlowLoader::load( cache, *builder, m_logger, enginePath + "/" + WaveGlowInstance::ENGINE_NAME + ENGINE_EXT, true, modelConfig.max_batch_size()); std::shared_ptr<DenoiserInstance> denoiser(nullptr); if (Utils::parseBool( modelConfig.parameters().at("use_denoiser").string_value())) { try { denoiser = DenoiserLoader::load( cache, *builder, enginePath + "/denoiser.eng", true, modelConfig.max_batch_size()); } catch (const std::exception& e) { std::cerr << "WARNING: Failed to load denoiser: " << e.what() << std::endl; } } m_synthesizer.reset(new SpeechSynthesizer(tacotron2, waveglow, denoiser)); m_inputLength.resize(m_synthesizer->getMaxBatchSize()); m_outputLength.resize(m_synthesizer->getMaxBatchSize()); m_inputHost.resize( m_synthesizer->getMaxBatchSize() * m_synthesizer->getMaxInputSize()); m_outputHost.resize( m_synthesizer->getMaxBatchSize() * m_synthesizer->getMaxOutputSize()); // mark children for timing output addChild(m_synthesizer.get()); addChild(&m_reader); addChild(&m_writer); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ int CustomContext::execute( const int numPayloads, CustomPayload* const payloads, CustomGetNextInputFn_t inputFn, CustomGetOutputFn_t outputFn) { int rv = ErrorCode::SUCCESS; try { resetTiming(); startTiming(); int64_t numSamples = 0; for (int payloadIndex = 0; payloadIndex < numPayloads; ++payloadIndex) { const custom_payload_struct * payload = payloads+payloadIndex; if (payload->input_cnt != 1) { throw std::runtime_error( "Encountered input count of " + std::to_string(payload->input_cnt) + " for payload " + std::to_string(payloadIndex)); } // want input that is just 1 dimension sequence if (payload->input_shape_dim_cnts[0] != 1) { throw std::runtime_error( "Encountered input with " + std::to_string(payload->input_shape_dim_cnts[0]) + " dimensions (only accepts 1)."); } const int batchSize = payloads[payloadIndex].batch_size; // copy input to device int32_t inputSpacing; m_reader.read( payload->input_context, inputFn, m_inputHost.size(), batchSize, m_inputHost.data(), m_inputLength.data(), &inputSpacing); m_synthesizer->inferFromHost( batchSize, m_inputHost.data(), inputSpacing, m_inputLength.data(), m_outputHost.data(), m_outputLength.data()); // compute total audio time for (int batchIndex = 0; batchIndex < batchSize; ++batchIndex) { numSamples += m_outputLength[batchIndex]; } m_writer.write( payload->output_context, outputFn, batchSize, m_synthesizer->getMaxOutputSize(), m_outputHost.data(), m_outputLength.data()); } stopTiming(); const float totalAudioLength = static_cast<float>(numSamples) / 22050.0f; std::cout << "Generated " << totalAudioLength << " seconds of 22Khz audio." << std::endl; std::cout << "GPU Inference Time:" << std::endl; printTiming(std::cout, 1); } catch (const std::exception& e) { std::cerr << "Exception in CustomContext::execute(): " << e.what() << std::endl; resetTiming(); rv = ErrorCode::ERROR; } return rv; } const char * CustomContext::errorToString( const int error) const { return m_errMessages[error].c_str(); } std::mutex CustomContext::m_mutex;
PyTorch/Segmentation/MaskRCNN/pytorch
pytorch
TROUBLESHOOTING
# Troubleshooting Here is a compilation if common issues that you might face while compiling / running this code: ## Compilation errors when compiling the library If you encounter build errors like the following: ``` /usr/include/c++/6/type_traits:1558:8: note: provided for ‘template<class _From, class _To> struct std::is_convertible’ struct is_convertible ^~~~~~~~~~~~~~ /usr/include/c++/6/tuple:502:1: error: body of constexpr function ‘static constexpr bool std::_TC<<anonymous>, _Elements>::_NonNestedTuple() [with _SrcTuple = std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor>&&; bool <anonymous> = true; _Elements = {at::Tensor, at::Tensor, at::Tensor, at::Tensor}]’ not a return-statement } ^ error: command '/usr/local/cuda/bin/nvcc' failed with exit status 1 ``` check your CUDA version and your `gcc` version. ``` nvcc --version gcc --version ``` If you are using CUDA 9.0 and gcc 6.4.0, then refer to https://github.com/facebookresearch/maskrcnn-benchmark/issues/25, which has a summary of the solution. Basically, CUDA 9.0 is not compatible with gcc 6.4.0. ## ImportError: No module named maskrcnn_benchmark.config when running webcam.py This means that `maskrcnn-benchmark` has not been properly installed. Refer to https://github.com/facebookresearch/maskrcnn-benchmark/issues/22 for a few possible issues. Note that we now support Python 2 as well. ## ImportError: Undefined symbol: __cudaPopCallConfiguration error when import _C This probably means that the inconsistent version of NVCC compile and your conda CUDAToolKit package. This is firstly mentioned in https://github.com/facebookresearch/maskrcnn-benchmark/issues/45 . All you need to do is: ``` # Check the NVCC compile version(e.g.) /usr/cuda-9.2/bin/nvcc --version # Check the CUDAToolKit version(e.g.) ~/anaconda3/bin/conda list | grep cuda # If you need to update your CUDAToolKit ~/anaconda3/bin/conda install -c anaconda cudatoolkit==9.2 ``` Both of them should have the **same** version. For example, if NVCC==9.2 and CUDAToolKit==9.2, this will be fine while when NVCC==9.2 but CUDAToolKit==9, it fails. ## Segmentation fault (core dumped) when running the library This probably means that you have compiled the library using GCC < 4.9, which is ABI incompatible with PyTorch. Indeed, during installation, you probably saw a message like ``` Your compiler (g++ 4.8) may be ABI-incompatible with PyTorch! Please use a compiler that is ABI-compatible with GCC 4.9 and above. See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html. See https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6 for instructions on how to install GCC 4.9 or higher. ``` Follow the instructions on https://gist.github.com/goldsborough/d466f43e8ffc948ff92de7486c5216d6 to install GCC 4.9 or higher, and try recompiling `maskrcnn-benchmark` again, after cleaning the `build` folder with ``` rm -rf build ```
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2LSTMCellPlugin
taco2LSTMCellPlugin
CMakeLists
# # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # file(GLOB SRCS *.cpp *.cu) set(PLUGIN_SOURCES ${PLUGIN_SOURCES} ${SRCS}) set(PLUGIN_SOURCES ${PLUGIN_SOURCES} PARENT_SCOPE)
PyTorch/Classification/GPUNet/triton/125ms-D/runner
runner
config_NVIDIA-DGX-1-(1x-V100-32GB)
batching: dynamic checkpoints: - name: 1.25ms-D url: https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d1_pyt_ckpt/versions/21.12.0_amp/zip configurations: - checkpoint: 1.25ms-D parameters: backend_accelerator: trt checkpoint: 1.25ms-D device_kind: gpu export_format: onnx export_precision: fp16 format: onnx max_batch_size: 64 number_of_model_instances: 2 precision: fp16 tensorrt_capture_cuda_graph: 0 torch_jit: none container_version: '21.12' datasets: - name: imagenet datasets_dir: datasets ensemble_model_name: null framework: PyTorch measurement_steps_offline: 8 measurement_steps_online: 32 model_name: GPUnet performance_tool: model_analyzer triton_container_image: nvcr.io/nvidia/tritonserver:21.12-py3 triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
PaddlePaddle/LanguageModeling/BERT/data
data
SquadDownloader
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import urllib.request class SquadDownloader: def __init__(self, save_path): self.save_path = save_path + '/squad' if not os.path.exists(self.save_path): os.makedirs(self.save_path) if not os.path.exists(self.save_path + '/v1.1'): os.makedirs(self.save_path + '/v1.1') if not os.path.exists(self.save_path + '/v2.0'): os.makedirs(self.save_path + '/v2.0') self.download_urls = { 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json': 'v1.1/train-v1.1.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json': 'v1.1/dev-v1.1.json', 'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/': 'v1.1/evaluate-v1.1.py', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json': 'v2.0/train-v2.0.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json': 'v2.0/dev-v2.0.json', 'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/': 'v2.0/evaluate-v2.0.py', } def download(self): for item in self.download_urls: url = item file = self.download_urls[item] print('Downloading:', url) if os.path.isfile(self.save_path + '/' + file): print('** Download file already exists, skipping download') else: response = urllib.request.urlopen(url) with open(self.save_path + '/' + file, "wb") as handle: handle.write(response.read())
PyTorch/Detection/Efficientdet/effdet/csrc/nms/cpu
cpu
vision
// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <torch/extension.h> at::Tensor nms_cpu(const at::Tensor& dets, const at::Tensor& scores, const float threshold);
PyTorch/Translation/Transformer/fairseq/modules
modules
__init__
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from .beamable_mm import BeamableMM from .learned_positional_embedding import LearnedPositionalEmbedding from .multihead_attention import MultiheadAttention from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding __all__ = [ 'BeamableMM', 'LearnedPositionalEmbedding', 'MultiheadAttention', 'SinusoidalPositionalEmbedding', ]
PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/unidecoder
unidecoder
homoglyphs
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # The MIT License (MIT) # # Copyright (c) 2015 Rob Dawson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Based on: # https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt # homoglyphs = { ' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'], '!': ['ǃ', 'ⵑ', '!'], '$': ['$'], '%': ['%'], '&': ['ꝸ', '&'], "'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'], '"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'], '(': ['❨', '❲', '〔', '﴾', '(', '['], ')': ['❩', '❳', '〕', '﴿', ')', ']'], '*': ['٭', '⁎', '∗', '*', '𐌟'], '+': ['᛭', '➕', '+', '𐊛'], ',': ['¸', '؍', '٫', '‚', 'ꓹ', ','], '-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'], '.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'], '/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'], '2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'], '3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'], '4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'], '5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'], '6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'], '7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'], '8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'], '9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'], ':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'], ';': [';', ';'], '<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'], '=': ['᐀', '⹀', '゠', '꓿', '='], '>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'], '?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'], '@': ['@'], 'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'], 'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'], 'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'], 'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'], 'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'], 'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'], 'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'], 'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'], 'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'], 'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'], 'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'], 'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'], 'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'], 'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'], 'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'], 'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'], 'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'], 'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'], 'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'], 'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'], 'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'], 'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'], 'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'], 'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'], '\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'], '^': ['˄', 'ˆ'], '_': ['ߺ', '﹍', '﹎', '﹏', '_'], 'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'], 'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'], 'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'], 'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'], 'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'], 'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'], 'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'], 'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'], 'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'], 'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'], 'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'], 'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'], 'm': ['m'], 'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'], 'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'], 'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'], 'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'], 'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'], 's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'], 't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'], 'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'], 'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'], 'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'], 'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'], 'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'], 'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'], '{': ['❴', '{', '𝄔'], '}': ['❵', '}'], '~': ['˜', '῀', '⁓', '∼'], }
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact
tensorflow-dot-based-interact
build_pip_pkg
#!/usr/bin/env bash # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e DEST=$(readlink -f "artifacts") mkdir -p "${DEST}" TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX) cp setup.py "${TMPDIR}" cp MANIFEST.in "${TMPDIR}" cp LICENSE "${TMPDIR}" rsync -avm -L --exclude='*_test.py' --exclude='*/cc/*' --exclude='*/__pycache__/*' ${PIP_FILE_PREFIX}tensorflow_dot_based_interact "${TMPDIR}" pushd ${TMPDIR} python3 setup.py bdist_wheel > /dev/null cp dist/*.whl "${DEST}" popd rm -rf ${TMPDIR}
PyTorch/SpeechSynthesis/HiFiGAN/scripts
scripts
generate_filelists
#!/usr/bin/env bash set -e : ${DATASET_PATH:=data/LJSpeech-1.1} # Generate filelists python common/split_lj.py --metadata-path "${DATASET_PATH}/metadata.csv" --subsets train val test all python common/split_lj.py --metadata-path "${DATASET_PATH}/metadata.csv" --add-transcript --subsets all # used to extract ground-truth mels or pitch python common/split_lj.py --metadata-path "${DATASET_PATH}/metadata.csv" --add-pitch --add-transcript --subsets all # used during extracting fastpitch mels
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils
utils
utils
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import logging import importlib from pathlib import PosixPath from typing import Optional, Union import cudf import cupy import dask.dataframe as dd import dask_cudf import cupy as cp import numpy as np import pandas as pd import os from syngen.utils.types import DataFrameType, NDArray logger = logging.getLogger(__name__) log = logger class CustomTimer: """Wraps `time` module and adds tagging for multiple timers Example: timer = CustomTimer() timer.start_counter("tag") # - do a series of operation # ... # - end of operations timer.end_counter("tag", "tag timer has ended") Args: path (Optional[str]) """ def __init__(self, path: Optional[Union[PosixPath, str]] = str, verbose: bool = False): self.path = path self.verbose = verbose self.timers = {} self.f = None if self.path: self.f = open(self.path, "w") def start_counter(self, key: str): self.timers[key] = time.perf_counter() def end_counter(self, key: str, msg: str): end = time.perf_counter() start = self.timers.get(key, None) if start is None: return message_string = f"{msg}: {end - start:.2f}\n" if self.f: self.f.write(message_string) if self.verbose: print(message_string, end='') def maybe_close(self): if self.f: self.f.close() def current_ms_time(): return round(time.time() * 1000) def to_ndarray(df: DataFrameType) -> NDArray: """ Returns potentially distributed data frame to its in-memory equivalent array. """ if isinstance(df, (cudf.DataFrame, pd.DataFrame)): return df.values elif isinstance(df, (dask_cudf.DataFrame, dd.DataFrame)): return df.compute().values else: raise NotImplementedError(f'Conversion of type {type(df)} is not supported') def df_to_pandas(df): """ Converts `DataFrameType` to `pandas.DataFrame` Args: df (DataFrameType): the DataFrame to be converted """ if isinstance(df, cudf.DataFrame): pddf = df.to_pandas() elif isinstance(df, dask_cudf.DataFrame): pddf = pd.DataFrame( cupy.asnumpy(df.values.compute()), columns=df.columns ) elif isinstance(df, pd.DataFrame): pddf = df else: raise ValueError(f"DataFrame type {type(df)} not supported") return pddf def df_to_cudf(df: DataFrameType): """ Converts `DataFrameType` to `cudf.DataFrame` Args: df (DataFrameType): the DataFrame to be converted """ if isinstance(df, cudf.DataFrame): pass elif isinstance(df, dask_cudf.DataFrame): df = cudf.DataFrame( cupy.asnumpy(df.values.compute()), columns=df.columns ) elif isinstance(df, pd.DataFrame): df = cudf.from_pandas(df) else: raise ValueError(f"DataFrameType type {type(df)} not supported") return df def df_to_dask_cudf(df: DataFrameType, chunksize: Optional[int] = None): """ Converts `DataFrameType` to `dask_cudf.DataFrame` Args: df (DataFrameType): the DataFrame to be converted chunksize (int): dask chunk size. (default: min(1e6, len(df) // num_devices)) """ if chunksize is None: chunksize = min( int(1e6), len(df) // cupy.cuda.runtime.getDeviceCount() ) if isinstance(df, cudf.DataFrame): df = dask_cudf.from_cudf(df, chunksize=chunksize) elif isinstance(df, dask_cudf.DataFrame): pass elif isinstance(df, pd.DataFrame): df = cudf.from_pandas(df) df = dask_cudf.from_cudf(df, chunksize=chunksize) else: raise ValueError(f"DataFrameType type {type(df)} not supported") return df def dynamic_import(object_path): """Import an object from its full path.""" if isinstance(object_path, str): parent, obj_name = object_path.rsplit(".", 1) try: parent = importlib.import_module(parent) except ImportError: raise ImportError(f"Could not import {object_path}") return getattr(parent, obj_name) return object_path def get_object_path(obj): return obj.__class__.__module__ + '.' + obj.__class__.__name__ def ensure_path(path: Union[str, PosixPath]): if not os.path.exists(os.path.dirname(path)): os.makedirs(os.path.dirname(path)) return path def infer_operator(ndarray: NDArray): """ Returns array backend module (numpy or cupy). """ if isinstance(ndarray, np.ndarray): return np elif isinstance(ndarray, cp.ndarray): return cp else: logger.warning( 'Detected array of type %s, while one of (%s) was expected. Defaulting to using numpy', type(ndarray), 'numpy.ndarray, cupy.ndarray', ) return np
TensorFlow2/Segmentation/Contrib/UNet3P/data_preparation
data_preparation
delete_extracted_scans_data
rm -r 'data/Training Batch 1/' rm -r 'data/Training Batch 2/'
PyTorch/SpeechSynthesis/FastPitch/triton/scripts
scripts
setup_parameters
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. export PRECISION="fp16" export FORMAT="ts-trace" export BATCH_SIZE="1,2,4,8" export BACKEND_ACCELERATOR="none" export MAX_BATCH_SIZE="8" export NUMBER_OF_MODEL_INSTANCES="2" export TRITON_MAX_QUEUE_DELAY="1" export TRITON_PREFERRED_BATCH_SIZES="4 8" export SEQUENCE_LENGTH="128" export CONFIG_FORMAT="torchscript"
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit
deployment_toolkit
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/Segmentation/nnUNet
nnUNet
README
# nnU-Net For PyTorch This repository provides a script and recipe to train the nnU-Net model to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [TF32](#tf32) * [Glossary](#glossary) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Multi-dataset](#multi-dataset) * [Training process](#training-process) * [Inference process](#inference-process) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 80G)](#training-accuracy-nvidia-dgx-a100-8x-a100-80g) * [Training accuracy: NVIDIA DGX-1 (8x V100 32G)](#training-accuracy-nvidia-dgx-1-8x-v100-32g) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80G)](#training-performance-nvidia-dgx-a100-8x-a100-80g) * [Training performance: NVIDIA DGX-1 (8x V100 32G)](#training-performance-nvidia-dgx-1-8x-v100-32g) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 80G)](#inference-performance-nvidia-dgx-a100-1x-a100-80g) * [Inference performance: NVIDIA DGX-1 (1x V100 32G)](#inference-performance-nvidia-dgx-1-1x-v100-32g) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The nnU-Net ("no-new-Net") refers to a robust and self-adapting framework for U-Net based medical image segmentation. This repository contains a nnU-Net implementation as described in the paper: [nnU-Net: Self-adapting Framework for U-Net-Based Medical Image Segmentation](https://arxiv.org/abs/1809.10486). The differences between this nnU-net and [original model](https://github.com/MIC-DKFZ/nnUNet) are: - Dynamic selection of patch size is not supported, and it has to be set in `data_preprocessing/configs.py` file. - Cascaded U-Net is not supported. - The following data augmentations are not used: rotation, simulation of low resolution, gamma augmentation. This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. We developed the model using [PyTorch Lightning](https://www.pytorchlightning.ai), a new easy-to-use framework that ensures code readability and reproducibility without the boilerplate. ### Model architecture The nnU-Net allows the training of two types of networks: 2D U-Net and 3D U-Net to perform semantic segmentation of 3D images, with high accuracy and performance. The following figure shows the architecture of the 3D U-Net model and its different components. U-Net is composed of a contractive and an expanding path, that aims at building a bottleneck in its centremost part through a combination of convolution, instance norm, and leaky ReLU operations. After this bottleneck, the image is reconstructed through a combination of convolutions and upsampling. Skip connections are added with the goal of helping the backward flow of gradients to improve the training. <img src="images/unet3d.png" width="900"/> *Figure 1: The 3D U-Net architecture* ### Default configuration All convolution blocks in U-Net in both encoder and decoder are using two convolution layers followed by instance normalization and a leaky ReLU nonlinearity. For downsampling, we are using stride convolution whereas transposed convolution is used for upsampling. All models were trained with an Adam optimizer. For loss function we use the average of [cross-entropy](https://en.wikipedia.org/wiki/Cross_entropy) and [dice coefficient](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient). Early stopping is triggered if the validation dice score wasn't improved during the last 100 epochs. Used data augmentation: crop with oversampling the foreground class, mirroring, zoom, Gaussian noise, Gaussian blur, brightness, and contrast. ### Feature support matrix The following features are supported by this model: | Feature | nnUNet |-----------------------|-------------------------- |[DALI](https://docs.nvidia.com/deeplearning/dali/release-notes/index.html) | Yes |Automatic mixed precision (AMP) | Yes |Distributed data-parallel (DDP) | Yes #### Features **DALI** NVIDIA DALI - DALI is a library-accelerating data preparation pipeline. To speed up your input pipeline, you only need to define your data loader with the DALI library. For details, see example sources in this repository or see the [DALI documentation](https://docs.nvidia.com/deeplearning/dali/index.html) **Automatic Mixed Precision (AMP)** This implementation uses native PyTorch AMP implementation of mixed precision training. It allows us to use FP16 training with FP32 master weights by modifying a few lines of code. **DistributedDataParallel (DDP)** The model uses PyTorch Lightning implementation of distributed data parallelism at the module level which can run across multiple machines. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to keep as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x speedup on the most intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. For information about: * How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. * Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. * APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision For training and inference, mixed precision can be enabled by adding the `--amp` flag. Mixed precision is using [native PyTorch implementation](https://pytorch.org/blog/accelerating-training-on-nvidia-gpus-with-pytorch-automatic-mixed-precision/). #### TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Glossary **Test time augmentation** Test time augmentation is an inference technique that averages predictions from augmented images with its prediction. As a result, predictions are more accurate, but with the cost of a slower inference process. For nnU-Net, we use all possible flip combinations for image augmenting. Test time augmentation can be enabled by adding the `--tta` flag. ## Setup The following section lists the requirements that you need to meet to start training the nnU-Net model. ### Requirements This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - PyTorch 22.11 NGC container - Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) - Running [PyTorch](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/running.html#running) For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the nnUNet model on the [Medical Segmentation Decathlon](http://medicaldecathlon.com/) dataset. For the specifics on training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. Executing this command will create your local repository with all the code to run nnU-Net. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/PyTorch/Segmentation/nnUNet ``` 2. Build the nnU-Net PyTorch NGC container. This command will use the Dockerfile to create a Docker image named `nnunet`, downloading all the required components automatically. ``` docker build -t nnunet . ``` The NGC container contains all the components optimized for usage on NVIDIA hardware. 3. Start an interactive session in the NGC container to run preprocessing/training/inference. The following command will launch the container and mount the `./data` directory as a volume to the `/data` directory inside the container, and `./results` directory to the `/results` directory in the container. ``` mkdir data results docker run -it --privileged --runtime=nvidia --shm-size=8g --ulimit memlock=-1 --ulimit stack=67108864 --rm -v ${PWD}/data:/data -v ${PWD}/results:/results nnunet:latest /bin/bash ``` 4. Prepare the BraTS dataset. To download and preprocess the data run: ``` python download.py --task 01 python preprocess.py --task 01 --dim 3 python preprocess.py --task 01 --dim 2 ``` Then `ls /data` should print: ``` 01_3d 01_2d Task01_BrainTumour ``` For the specifics on data preprocessing, see the [Getting the data](#getting-the-data) section. 5. Start training. Training can be started with: ``` python scripts/train.py --gpus <gpus> --fold <fold> --dim <dim> [--amp] [--bind] ``` To see descriptions of the train script arguments run `python scripts/train.py --help`. You can customize the training process. For details, see the [Training process](#training-process) section. 6. Start benchmarking. The training and inference performance can be evaluated by using benchmarking scripts, such as: ``` python scripts/benchmark.py --mode {train,predict} --gpus <ngpus> --dim {2,3} --batch_size <bsize> [--amp] [--bind] ``` To see descriptions of the benchmark script arguments run `python scripts/benchmark.py --help`. 7. Start inference/predictions. Inference can be started with: ``` python scripts/inference.py --data <path/to/data> --dim <dim> --fold <fold> --ckpt_path <path/to/checkpoint> [--amp] [--tta] [--save_preds] ``` Note: You have to prepare either validation or test dataset to run this script by running `python preprocess.py --task 01 --dim {2,3} --exec_mode {val,test}`. After preprocessing inside given task directory (e.g. `/data/01_3d/` for task 01 and dim 3) it will create `val` or `test` directory with preprocessed data ready for inference. Possible workflow: ``` python preprocess.py --task 01 --dim 3 --exec_mode val python scripts/inference.py --data /data/01_3d/val --dim 3 --fold 0 --ckpt_path <path/to/checkpoint> --amp --tta --save_preds ``` Then if you have labels for predicted images you can evaluate them with `evaluate.py` script. For example: ``` python evaluate.py --preds /results/preds_task_01_dim_3_fold_0_tta --lbls /data/Task01_BrainTumour/labelsTr ``` To see descriptions of the inference script arguments run `python scripts/inference.py --help`. You can customize the inference process. For details, see the [Inference process](#inference-process) section. Now that you have your model trained and evaluated, you can choose to compare your training results with our [Training accuracy results](#training-accuracy-results). You can also choose to benchmark yours performance to [Training performance benchmark](#training-performance-results), or [Inference performance benchmark](#inference-performance-results). Following the steps in these sections will ensure that you achieve the same accuracy and performance results as stated in the [Results](#results) section. ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: * `main.py`: Entry point to the application. Runs training, evaluation, inference or benchmarking. * `preprocess.py`: Entry point to data preprocessing. * `download.py`: Downloads given dataset from [Medical Segmentation Decathlon](http://medicaldecathlon.com/). * `Dockerfile`: Container with the basic set of dependencies to run nnU-Net. * `requirements.txt:` Set of extra requirements for running nnU-Net. * `evaluate.py`: Compare predictions with ground truth and get the final score. The `data_preprocessing` folder contains information about the data preprocessing used by nnU-Net. Its contents are: * `configs.py`: Defines dataset configuration like patch size or spacing. * `preprocessor.py`: Implements data preprocessing pipeline. The `data_loading` folder contains information about the data pipeline used by nnU-Net. Its contents are: * `data_module.py`: Defines `LightningDataModule` used by PyTorch Lightning. * `dali_loader.py`: Implements DALI data loader. The `nnunet` folder contains information about the building blocks of nnU-Net and the way they are assembled. Its contents are: * `metrics.py`: Implements dice metric * `loss.py`: Implements loss function. * `nn_unet.py`: Implements training/validation/test logic and dynamic creation of U-Net architecture used by nnU-Net. The `utils` folder includes: * `args.py`: Defines command line arguments. * `utils.py`: Defines utility functions. * `logger.py`: Defines logging callback for performance benchmarking. The `notebooks` folder includes: * `BraTS21.ipynb`: Notebook with our solution ranked 3 for the BraTS21 challenge. * `BraTS22.ipynb`: Notebook with our solution ranked 2 for the BraTS22 challenge. * `custom_dataset.ipynb`: Notebook which demonstrates how to use nnU-Net with the custom dataset. Other folders included in the root directory are: * `images/`: Contains a model diagram. * `scripts/`: Provides scripts for training, benchmarking, and inference of nnU-Net. ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: `python main.py --help` The following example output is printed when running the model: ``` usage: main.py [-h] [--exec_mode {train,evaluate,predict}] [--data DATA] [--results RESULTS] [--logname LOGNAME] [--task TASK] [--gpus GPUS] [--learning_rate LEARNING_RATE] [--gradient_clip_val GRADIENT_CLIP_VAL] [--negative_slope NEGATIVE_SLOPE] [--tta] [--brats] [--deep_supervision] [--more_chn] [--invert_resampled_y] [--amp] [--benchmark] [--focal] [--sync_batchnorm] [--save_ckpt] [--nfolds NFOLDS] [--seed SEED] [--skip_first_n_eval SKIP_FIRST_N_EVAL] [--ckpt_path CKPT_PATH] [--fold FOLD] [--patience PATIENCE] [--batch_size BATCH_SIZE] [--val_batch_size VAL_BATCH_SIZE] [--profile] [--momentum MOMENTUM] [--weight_decay WEIGHT_DECAY] [--save_preds] [--dim {2,3}] [--resume_training] [--num_workers NUM_WORKERS] [--epochs EPOCHS] [--warmup WARMUP] [--norm {instance,batch,group}] [--nvol NVOL] [--depth DEPTH] [--min_fmap MIN_FMAP] [--deep_supr_num DEEP_SUPR_NUM] [--res_block] [--filters FILTERS [FILTERS ...]] [--data2d_dim {2,3}] [--oversampling OVERSAMPLING] [--overlap OVERLAP] [--affinity {socket,single_single,single_single_unique,socket_unique_interleaved,socket_unique_continuous,disabled}] [--scheduler] [--optimizer {sgd,adam}] [--blend {gaussian,constant}] [--train_batches TRAIN_BATCHES] [--test_batches TEST_BATCHES] optional arguments: -h, --help show this help message and exit --exec_mode {train,evaluate,predict} Execution mode to run the model (default: train) --data DATA Path to data directory (default: /data) --results RESULTS Path to results directory (default: /results) --logname LOGNAME Name of dlloger output (default: None) --task TASK Task number. MSD uses numbers 01-10 (default: None) --gpus GPUS Number of gpus (default: 1) --learning_rate LEARNING_RATE Learning rate (default: 0.0008) --gradient_clip_val GRADIENT_CLIP_VAL Gradient clipping norm value (default: 0) --negative_slope NEGATIVE_SLOPE Negative slope for LeakyReLU (default: 0.01) --tta Enable test time augmentation (default: False) --brats Enable BraTS specific training and inference (default: False) --deep_supervision Enable deep supervision (default: False) --more_chn Create encoder with more channels (default: False) --invert_resampled_y Resize predictions to match label size before resampling (default: False) --amp Enable automatic mixed precision (default: False) --benchmark Run model benchmarking (default: False) --focal Use focal loss instead of cross entropy (default: False) --sync_batchnorm Enable synchronized batchnorm (default: False) --save_ckpt Enable saving checkpoint (default: False) --nfolds NFOLDS Number of cross-validation folds (default: 5) --seed SEED Random seed (default: 1) --skip_first_n_eval SKIP_FIRST_N_EVAL Skip the evaluation for the first n epochs. (default: 0) --ckpt_path CKPT_PATH Path to checkpoint (default: None) --fold FOLD Fold number (default: 0) --patience PATIENCE Early stopping patience (default: 100) --batch_size BATCH_SIZE Batch size (default: 2) --val_batch_size VAL_BATCH_SIZE Validation batch size (default: 4) --profile Run dlprof profiling (default: False) --momentum MOMENTUM Momentum factor (default: 0.99) --weight_decay WEIGHT_DECAY Weight decay (L2 penalty) (default: 0.0001) --save_preds Enable prediction saving (default: False) --dim {2,3} UNet dimension (default: 3) --resume_training Resume training from the last checkpoint (default: False) --num_workers NUM_WORKERS Number of subprocesses to use for data loading (default: 8) --epochs EPOCHS Number of training epochs (default: 1000) --warmup WARMUP Warmup iterations before collecting statistics (default: 5) --norm {instance,batch,group} Normalization layer (default: instance) --nvol NVOL Number of volumes which come into single batch size for 2D model (default: 4) --depth DEPTH The depth of the encoder (default: 5) --min_fmap MIN_FMAP Minimal dimension of feature map in the bottleneck (default: 4) --deep_supr_num DEEP_SUPR_NUM Number of deep supervision heads (default: 2) --res_block Enable residual blocks (default: False) --filters FILTERS [FILTERS ...] [Optional] Set U-Net filters (default: None) --data2d_dim {2,3} Input data dimension for 2d model (default: 3) --oversampling OVERSAMPLING Probability of crop to have some region with positive label (default: 0.4) --overlap OVERLAP Amount of overlap between scans during sliding window inference (default: 0.5) --affinity {socket,single_single,single_single_unique,socket_unique_interleaved,socket_unique_continuous,disabled} type of CPU affinity (default: socket_unique_contiguous) --scheduler Enable cosine rate scheduler with warmup (default: False) --optimizer {sgd,adam} Optimizer (default: adam) --blend {gaussian,constant} How to blend output of overlapping windows (default: gaussian) --train_batches TRAIN_BATCHES Limit number of batches for training (used for benchmarking mode only) (default: 0) --test_batches TEST_BATCHES Limit number of batches for inference (used for benchmarking mode only) (default: 0) ``` ### Getting the data The nnU-Net model was trained on the [Medical Segmentation Decathlon](http://medicaldecathlon.com/) datasets. All datasets are in Neuroimaging Informatics Technology Initiative (NIfTI) format. #### Dataset guidelines To train nnU-Net you will need to preprocess your dataset as the first step with `preprocess.py` script. Run `python scripts/preprocess.py --help` to see descriptions of the preprocess script arguments. For example to preprocess data for 3D U-Net run: `python preprocess.py --task 01 --dim 3`. In `data_preprocessing/configs.py` for each [Medical Segmentation Decathlon](http://medicaldecathlon.com/) task, there are defined: patch sizes, precomputed spacings and statistics for CT datasets. The preprocessing pipeline consists of the following steps: 1. Cropping to the region of non-zero values. 2. Resampling to the median voxel spacing of their respective dataset (exception for anisotropic datasets where the lowest resolution axis is selected to be the 10th percentile of the spacings). 3. Padding volumes so that dimensions are at least as patch size. 4. Normalizing: * For CT modalities the voxel values are clipped to 0.5 and 99.5 percentiles of the foreground voxels and then data is normalized with mean and standard deviation collected from foreground voxels. * For MRI modalities z-score normalization is applied. #### Multi-dataset It is possible to run nnUNet on a custom dataset. If your dataset corresponds to [Medical Segmentation Decathlon](http://medicaldecathlon.com/) (i.e. data should be in `NIfTi` format and there should be `dataset.json` file where you need to provide fields: modality, labels, and at least one of training, test) you need to perform the following: 1. Mount your dataset to the `/data` directory. 2. In `data_preprocessing/config.py`: - Add to the `task_dir` dictionary your dataset directory name. For example, for the Brain Tumour dataset, it corresponds to `"01": "Task01_BrainTumour"`. - Add the patch size that you want to use for training to the `patch_size` dictionary. For example, for Brain Tumour dataset it corresponds to `"01_3d": [128, 128, 128]` for 3D U-Net and `"01_2d": [192, 160]` for 2D U-Net. There are three types of suffixes `_3d, _2d` they correspond to 3D UNet and 2D U-Net. 3. Preprocess your data with `preprocess.py` scripts. For example, to preprocess the Brain Tumour dataset for 2D U-Net you should run `python preprocess.py --task 01 --dim 2`. If you have a dataset in another format or you want to customize data preprocessing or data loading see `notebooks/custom_dataset.ipynb`. ### Training process The model trains for at least `--min_epochs` and at most `--max_epochs` epochs. After each epoch evaluation, the validation set is done and validation loss is monitored for early stopping (see `--patience` flag). Default training settings are: * Adam optimizer with a learning rate of 0.0008 and weight decay of 0.0001. * Training batch size is set to 2 for 3D U-Net and 16 for 2D U-Net. This default parametrization is applied when running scripts from the `scripts` directory and when running `main.py` without overriding these parameters. By default, the training is in full precision. To enable AMP, pass the `--amp` flag. AMP can be enabled for every mode of execution. The default configuration minimizes a function `L = (1 - dice_coefficient) + cross_entropy` during training and reports achieved convergence as [dice coefficient](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient) per class. The training, with a combination of dice and cross-entropy has been proven to achieve better convergence than training using only dice. The training can be run without using the predefined scripts. The name of the training script is `main.py`. For example: ``` python main.py --exec_mode train --task 01 --fold 0 --gpus 1 --amp ``` Training artifacts will be saved to `/results` in the container. Some important artifacts are: * `/results/logs.json`: Collected dice scores and loss values evaluated after each epoch during training on the validation set. * `/results/checkpoints`: Saved checkpoints. By default, two checkpoints are saved - one after each epoch ('last.ckpt') and one with the highest validation dice (e.g 'epoch=5.ckpt' for if the highest dice was at the 5th epoch). To load the pretrained model provide `--ckpt_path <path/to/checkpoint>`. ### Inference process Inference can be launched by passing the `--exec_mode predict` flag. For example: ``` python main.py --exec_mode predict --task 01 --fold 0 --gpus 1 --amp --tta --save_preds --ckpt_path <path/to/checkpoint> ``` The script will then: * Load the checkpoint from the directory specified by the `<path/to/checkpoint>` directory * Run inference on the preprocessed validation dataset corresponding to fold 0 * Print achieved score to the console * If `--save_preds` is provided then resulting masks in the NumPy format will be saved in the `/results` directory ## Performance ### Benchmarking The following section shows how to run benchmarks to measure the model performance in training and inference modes. #### Training performance benchmark To benchmark training, run the `scripts/benchmark.py` script with `--mode train`: ``` python scripts/benchmark.py --mode train --gpus <ngpus> --dim {2,3} --batch_size <bsize> [--amp] [--bind] ``` For example, to benchmark 3D U-Net training using mixed-precision on 8 GPUs with a batch size of 2, run: ``` python scripts/benchmark.py --mode train --gpus 8 --dim 3 --batch_size 2 --amp ``` Each of these scripts will by default run 1 warm-up epoch and start performance benchmarking during the second epoch. At the end of the script, a line reporting the best train throughput and latency will be printed. #### Inference performance benchmark To benchmark inference, run the `scripts/benchmark.py` script with `--mode predict`: ``` python scripts/benchmark.py --mode predict --dim {2,3} --batch_size <bsize> [--amp] ``` For example, to benchmark inference using mixed-precision for 3D U-Net, with a batch size of 4, run: ``` python scripts/benchmark.py --mode predict --dim 3 --amp --batch_size 4 ``` Each of these scripts will by default run a warm-up for 1 data pass and start inference benchmarking during the second pass. At the end of the script, a line reporting the inference throughput and latency will be printed. *Note that this benchmark reports performance numbers for iterations over samples with fixed patch sizes. The real inference process uses sliding window for input images with arbitrary resolution and performance may vary for images with different resolutions.* ### Results The following sections provide details on how to achieve the same performance and accuracy in training and inference. #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 80G) Our results were obtained by running the `python scripts/train.py --gpus {1,8} --fold {0,1,2,3,4} --dim {2,3} [--amp] [--bind] --learning_rate lr --seed n` training scripts and averaging results in the PyTorch 22.11 NGC container on NVIDIA DGX with (8x A100 80G) GPUs. Note: We recommend using `--bind` flag for multi-GPU settings to increase the throughput. To launch multi-GPU with `--bind` use PyTorch distributed launcher, e.g., `python -m torch.distributed.launch --use_env --nproc_per_node=8 scripts/benchmark.py --mode train --gpus 8 --dim 3 --amp --batch_size 2 --bind` for the interactive session, or use regular command when launching with SLURM's sbatch. | Dimension | GPUs | Batch size / GPU | Dice - mixed precision | Dice - TF32 | Time to train - mixed precision | Time to train - TF32| Time to train speedup (TF32 to mixed precision) |:-:|:-:|:--:|:-----:|:-----:|:-----:|:-----:|:----:| | 2 | 1 | 2 | 73.21 | 73.11 | 33 min| 48 min| 1.46 | | 2 | 8 | 2 | 73.15 | 73.16 | 9 min| 13 min| 1.44 | | 3 | 1 | 2 | 74.35 | 74.34 |104 min|167 min| 1.61 | | 3 | 8 | 2 | 74.30 | 74.32 | 23min| 36 min| 1.57 | Reported dice score is the average over 5 folds from the best run for grid search over learning rates {1e-4, 2e-4, ..., 9e-4} and seed {1, 3, 5}. ##### Training accuracy: NVIDIA DGX-1 (8x V100 32G) Our results were obtained by running the `python scripts/train.py --gpus {1,8} --fold {0,1,2,3,4} --dim {2,3} [--amp] [--bind] --seed n ` training scripts and averaging results in the PyTorch 22.11 NGC container on NVIDIA DGX-1 with (8x V100 32G) GPUs. Note: We recommend using `--bind` flag for multi-GPU settings to increase the throughput. To launch multi-GPU with `--bind` use PyTorch distributed launcher, e.g., `python -m torch.distributed.launch --use_env --nproc_per_node=8 scripts/benchmark.py --mode train --gpus 8 --dim 3 --amp --batch_size 2 --bind` for the interactive session, or use regular command when launching with SLURM's sbatch. | Dimension | GPUs | Batch size / GPU | Dice - mixed precision | Dice - FP32 | Time to train - mixed precision | Time to train - FP32 | Time to train speedup (FP32 to mixed precision) |:-:|:-:|:--:|:-----:|:-----:|:-----:|:-----:|:----:| | 2 | 1 | 2 | 73.18 | 73.22 | 60 min|114 min| 1.90 | | 2 | 8 | 2 | 73.15 | 73.18 | 13 min| 19 min| 1.46 | | 3 | 1 | 2 | 74.31 | 74.33 |201 min|680 min| 3.38 | | 3 | 8 | 2 | 74.35 | 74.39 | 41 min|153 min| 3.73 | Reported dice score is the average over 5 folds from the best run for grid search over learning rates {1e-4, 2e-4, ..., 9e-4} and seed {1, 3, 5}. #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 80G) Our results were obtained by running the `python scripts/benchmark.py --mode train --gpus {1,8} --dim {2,3} --batch_size <bsize> [--amp]` training script in the NGC container on NVIDIA DGX A100 (8x A100 80G) GPUs. Performance numbers (in volumes per second) were averaged over an entire training epoch. Note: We recommend using `--bind` flag for multi-gpu settings to increase the througput. To launch multi-GPU with `--bind` use `python -m torch.distributed.launch --use_env --nproc_per_node=<npgus> scripts/train.py --bind ...` for the interactive session, or use regular command when launching with SLURM's sbatch. | Dimension | GPUs | Batch size / GPU | Throughput - mixed precision [img/s] | Throughput - TF32 [img/s] | Throughput speedup (TF32 - mixed precision) | Weak scaling - mixed precision | Weak scaling - TF32 | |:-:|:-:|:--:|:------:|:------:|:-----:|:-----:|:-----:| | 2 | 1 | 32 | 1040.58 | 732.22 | 1.42 | - | - | | 2 | 1 | 64 | 1238.68 | 797.37 | 1.55 | - | - | | 2 | 1 | 128 | 1345.29 | 838.38 | 1.60 | - | - | | 2 | 8 | 32 | 7747.27 | 5588.2 | 1.39 | 7.45 | 7.60 | | 2 | 8 | 64 | 9417.27 | 6246.95 | 1.51 | 7.60 | 8.04 | | 2 | 8 | 128 | 10694.1 | 6631.08 | 1.61 | 7.95 | 7.83 | | 3 | 1 | 1 | 24.61 | 9.66 | 2.55 | - | - | | 3 | 1 | 2 | 27.48 | 11.27 | 2.44 | - | - | | 3 | 1 | 4 | 29.96 | 12.22 | 2.45 | - | - | | 3 | 8 | 1 | 187.07 | 76.44 | 2.45 | 7.63 | 7.91 | | 3 | 8 | 2 | 220.83 | 88.67 | 2.49 | 7.83 | 7.87 | | 3 | 8 | 4 | 234.5 | 96.61 | 2.43 | 7.91 | 7.91 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training performance: NVIDIA DGX-1 (8x V100 32G) Our results were obtained by running the `python scripts/benchmark.py --mode train --gpus {1,8} --dim {2,3} --batch_size <bsize> [--amp] [--bind]` training script in the PyTorch 22.11 NGC container on NVIDIA DGX-1 with (8x V100 32G) GPUs. Performance numbers (in volumes per second) were averaged over an entire training epoch. Note: We recommend using `--bind` flag for multi-gpu settings to increase the througput. To launch multi-GPU with `--bind` use `python -m torch.distributed.launch --use_env --nproc_per_node=<npgus> scripts/train.py --bind ...` for the interactive session, or use regular command when launching with SLURM's sbatch. | Dimension | GPUs | Batch size / GPU | Throughput - mixed precision [img/s] | Throughput - FP32 [img/s] | Throughput speedup (FP32 - mixed precision) | Weak scaling - mixed precision | Weak scaling - FP32 | |:-:|:-:|:---:|:---------:|:-----------:|:--------:|:---------:|:-------------:| | 2 | 1 | 32 | 561.6 | 310.21 | 1.81 | - | - | | 2 | 1 | 64 | 657.91 | 326.02 | 2.02 | - | - | | 2 | 1 | 128 | 706.92 | 332.81 | 2.12 | - | - | | 2 | 8 | 32 | 3903.88 | 2396.88 | 1.63 | 6.95 | 7.73 | | 2 | 8 | 64 | 4922.76 | 2590.66 | 1.90 | 7.48 | 7.95 | | 2 | 8 | 128 | 5597.87 | 2667.56 | 2.10 | 7.92 | 8.02 | | 3 | 1 | 1 | 11.38 | 2.07 | 5.50 | - | - | | 3 | 1 | 2 | 12.34 | 2.51 | 4.92 | - | - | | 3 | 8 | 1 | 84.38 | 16.55 | 5.10 | 7.41 | 8.00 | | 3 | 8 | 2 | 98.17 | 20.15 | 4.87 | 7.96 | 8.03 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). #### Inference performance results ##### Inference performance: NVIDIA DGX A100 (1x A100 80G) Our results were obtained by running the `python scripts/benchmark.py --mode predict --dim {2,3} --batch_size <bsize> [--amp]` inferencing benchmarking script in the PyTorch 22.11 NGC container on NVIDIA DGX A100 (1x A100 80G) GPU. FP16 | Dimension | Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] | |:----------:|:---------:|:-------------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:| | 2 | 32 | 192x160 | 1818.05 | 17.6 | 19.86 | 20.38 | 20.98 | | 2 | 64 | 192x160 | 3645.16 | 17.56 | 19.86 | 20.82 | 23.66 | | 2 | 128 | 192x160 | 3850.35 | 33.24 | 34.72 | 61.4 | 63.58 | | 3 | 1 | 128x128x128 | 68.45 | 14.61 | 17.02 | 17.41 | 19.27 | | 3 | 2 | 128x128x128 | 56.9 | 35.15 | 40.9 | 43.15 | 57.94 | | 3 | 4 | 128x128x128 | 76.39 | 52.36 | 57.9 | 59.52 | 70.24 | TF32 | Dimension | Batch size | Resolution | Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] | |:----------:|:---------:|:-------------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:| | 2 | 32 | 192x160 | 1868.56 | 17.13 | 51.75 | 53.07 | 54.92 | | 2 | 64 | 192x160 | 2508.57 | 25.51 | 56.83 | 90.08 | 96.87 | | 2 | 128 | 192x160 | 2609.6 | 49.05 | 191.48 | 201.8 | 205.29 | | 3 | 1 | 128x128x128 | 35.02 | 28.55 | 51.75 | 53.07 | 54.92 | | 3 | 2 | 128x128x128 | 39.88 | 50.15 | 56.83 | 90.08 | 96.87 | | 3 | 4 | 128x128x128 | 41.32 | 96.8 | 191.48 | 201.8 | 205.29 | Throughput is reported in images per second. Latency is reported in milliseconds per batch. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Inference performance: NVIDIA DGX-1 (1x V100 32G) Our results were obtained by running the `python scripts/benchmark.py --mode predict --dim {2,3} --batch_size <bsize> [--amp]` inferencing benchmarking script in the PyTorch 22.11 NGC container on NVIDIA DGX-1 with (1x V100 32G) GPU. FP16 | Dimension | Batch size |Resolution| Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] | |:----------:|:---------:|:-------------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:| | 2 | 32 | 192x160 | 1254.38 | 25.51 | 29.07 | 30.07 | 31.23 | | 2 | 64 | 192x160 | 2024.13 | 31.62 | 71.51 | 71.78 | 72.44 | | 2 | 128 | 192x160 | 2136.95 | 59.9 | 61.23 | 61.63 | 110.13 | | 3 | 1 | 128x128x128 | 36.93 | 27.08 | 28.6 | 31.43 | 48.3 | | 3 | 2 | 128x128x128 | 38.86 | 51.47 | 53.3 | 54.77 | 92.49 | | 3 | 4 | 128x128x128 | 39.15 | 102.18 | 104.62 | 112.17 | 180.47 | FP32 | Dimension | Batch size |Resolution| Throughput Avg [img/s] | Latency Avg [ms] | Latency 90% [ms] | Latency 95% [ms] | Latency 99% [ms] | |:----------:|:---------:|:-------------:|:----------------------:|:----------------:|:----------------:|:----------------:|:----------------:| | 2 | 32 | 192x160 | 1019.97 | 31.37 | 32.93 | 55.58 | 69.14 | | 2 | 64 | 192x160 | 1063.59 | 60.17 | 62.32 | 63.11 | 111.01 | | 2 | 128 | 192x160 | 1069.81 | 119.65 | 123.48 | 123.83 | 225.46 | | 3 | 1 | 128x128x128 | 9.92 | 100.78 | 103.2 | 103.62 | 111.97 | | 3 | 2 | 128x128x128 | 10.14 | 197.33 | 201.05 | 201.4 | 201.79 | | 3 | 4 | 128x128x128 | 10.25 | 390.33 | 398.21 | 399.34 | 401.05 | Throughput is reported in images per second. Latency is reported in milliseconds per batch. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ## Release notes ### Changelog November 2022 - Container updated to 22.11 - Add support for 3D channel last convolutions - Add support for nvFuser Instance Normalization - Add support for GPU binding October 2022 - Add Jupyter Notebook with BraTS'22 solution (ranked 2) December 2021 - Container updated to 22.11 - Use MONAI DynUNet instead of custom U-Net implementation - Add balanced multi-GPU evaluation - Support for evaluation with resampled volumes to original shape October 2021 - Add Jupyter Notebook with BraTS'21 solution (ranked 3) May 2021 - Add Triton Inference Server support - Removed deep supervision, attention, and drop block March 2021 - Container updated to 21.02 - Change data format from tfrecord to npy and data loading for 2D January 2021 - Initial release - Add notebook with custom dataset loading ### Known issues There are no known issues in this release.
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules
modules
gelu
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ See "Gaussian Error Linear Units (GELUs)" by Dan Hendrycks and Kevin Gimpel with the corresponding GitHub repo: https://github.com/hendrycks/GELUs """ import math import torch import torch.nn as nn def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ProjectionPlugin
taco2ProjectionPlugin
taco2ProjectionLayerPluginCreator
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_PROJECTIONLAYERPLUGINCREATOR_H #define TT2I_PROJECTIONLAYERPLUGINCREATOR_H #include "NvInfer.h" #include <string> #ifdef DEVEL // The destructor of nvinfer1::IPluginCreator is non-virtual and public, so // we need to supress the warning. #pragma GCC diagnostic ignored "-Wnon-virtual-dtor" #endif namespace nvinfer1 { namespace plugin { class Taco2ProjectionLayerPluginCreator : public nvinfer1::IPluginCreator { public: /** * @brief Get the collection of fields for this plugin, with their names only. * * @return The collection of fields. */ static nvinfer1::PluginFieldCollection* getFields(); /** * @brief Create a new Taco2ProjectionLayerPluginCreator. */ Taco2ProjectionLayerPluginCreator(); /** * @brief Get the name of the plugin. * * @return The name of the plugin. */ const char* getPluginName() const override; /** * @brief Get the plugin version. * * @return The plugin version. */ const char* getPluginVersion() const override; /** * @brief Get the collection of fields for this plugin. * * @return The collection of fields. */ const nvinfer1::PluginFieldCollection* getFieldNames() override; /** * @brief Create a new Taco2ProjectionLayerPlugin. * * @param name The name (unused currently). * @param fc The collection of fields to initialize with. * * @return The created plugin. */ nvinfer1::IPluginV2* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) override; /** * @brief Create a custom layer by name from a data stream. * * @param layerName The name of the layer. * @param serialData The serialized data for the layer. * @param serialLength The length of the serialized data. * * @return The plugin. Clients must destroy the plugin once all consumers of * it have been destroyed. */ nvinfer1::IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override; /** * @brief Set the namespace for created plugins. * * @param pluginNamespace The namespace. */ void setPluginNamespace(const char* pluginNamespace) override; /** * @brief Get the namespace for created plugins. * * @return The namespace. */ const char* getPluginNamespace() const override; private: std::string mNamespace; }; } // namespace plugin } // namespace nvinfer1 #ifdef DEVEL #pragma GCC diagnostic pop #endif #endif
TensorFlow2/Recommendation/DLRM_and_DCNv2/dataloading
dataloading
generate_feature_spec
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) import yaml import argparse variants = dict( # Generates 16 GiB embedding tables criteo_t15_synthetic=dict( num_numerical=13, cardinalities=[7912889, 33823, 17139, 7339, 20046, 4, 7105, 1382, 63, 5554114, 582469, 245828, 11, 2209, 10667, 104, 4, 968, 15, 8165896, 2675940, 7156453, 302516, 12022, 97, 35], hotness=26 * [1], alpha=26 * [1.45] ), # Generates 85 GiB embedding tables criteo_t3_synthetic=dict( num_numerical=13, cardinalities=[45833188,36747,1572176,345139,11,2209,11268,128,4,975,15,48937457,17246,11316796,40094537, 452104,12607,105,36,7414,20244,4,7115,1442,63,29275261], hotness=26 * [1], alpha=26 * [1.45] ), # Generates 421 GiB criteo_t0_synthetic=dict( num_numerical=13, cardinalities=[227605432, 39061, 3067956, 405283, 11, 2209, 11939, 155, 4, 977, 15, 292775614, 17296, 40790948, 187188510, 590152, 12974, 109, 37, 7425, 20266, 4, 7123, 1544, 64, 130229467], hotness=26 * [1], alpha=26 * [1.45] ), ) def main(): parser = argparse.ArgumentParser(description="Generate a synthetic feature spec") parser.add_argument('--dst', default='feature_spec.yaml', type=str, help='Output path') parser.add_argument('--variant', choices=list(variants.keys()), required=True, type=str, help='Variant of the synthetic dataset to be used') args = parser.parse_args() num_numerical, cardinalities, hotness, alphas = tuple(variants[args.variant].values()) feature_spec = {} for i, (c, h, a) in enumerate(zip(cardinalities, hotness, alphas)): name = f'cat_{i}' f = dict(cardinality=c, hotness=h, alpha=a, dtype='int32') feature_spec[name] = f for i in range(num_numerical): name = f'num_{i}' feature_spec[name] = dict(dtype='float16') feature_spec['label'] = dict(dtype='int8') channel_spec = {} channel_spec['categorical'] = [k for k in feature_spec.keys() if 'cat' in k] channel_spec['numerical'] = [k for k in feature_spec.keys() if 'num' in k] channel_spec['label'] = ['label'] source_spec = None full_spec = dict(feature_spec=feature_spec, channel_spec=channel_spec, source_spec=source_spec) with open(args.dst, 'w') as f: yaml.dump(data=full_spec, stream=f) if __name__ == '__main__': main()
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_INFER_BENCHMARK_TF-AMP
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP16 on 1 GPU for inference benchmarking. Usage: # bash unet_INFER_BENCHMARK_TF-AMP.sh <path to dataset> <path to results directory> <batch size> horovodrun -np 1 python main.py --data_dir $1 --model_dir $2 --batch_size $3 --exec_mode predict --benchmark --warmup_steps 200 --max_steps 600 --xla --amp
PyTorch/Detection/Efficientdet/utils
utils
checkpoint_processing
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import torch from collections import OrderedDict parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation') parser.add_argument('--checkpoint_path', default='/checkpoints/model_best.pth.tar', help='path to checkpoint') parser.add_argument('--state_dict_path', default='/checkpoints/Effdet_B0.pth', help='path to save processed checkpoint state_dict to.') args = parser.parse_args() ckpt = torch.load(args.checkpoint_path) print("Checkpoint keys {}".format([k for k in ckpt.keys()])) if 'state_dict_ema' in ckpt: print("... state_dict found in ckpt") state_dict = ckpt['state_dict_ema'] new_state_dict = OrderedDict() for k, v in state_dict.items(): # strip `module.` prefix if k.startswith('module'): name = k[7:] elif k.startswith('model'): name = k[6:] else: name = k new_state_dict[name] = v print("... state_dict saving") torch.save(new_state_dict, args.state_dict_path) print("...End process")
PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/unidecoder
unidecoder
replacements
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MIT License # # Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (https://sindresorhus.com) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Based on: # https://github.com/sindresorhus/transliterate/blob/main/replacements.js # replacements = [ # German umlauts ['ß', 'ss'], ['ẞ', 'Ss'], ['ä', 'ae'], ['Ä', 'Ae'], ['ö', 'oe'], ['Ö', 'Oe'], ['ü', 'ue'], ['Ü', 'Ue'], # Latin ['À', 'A'], ['Á', 'A'], ['Â', 'A'], ['Ã', 'A'], ['Ä', 'Ae'], ['Å', 'A'], ['Æ', 'AE'], ['Ç', 'C'], ['È', 'E'], ['É', 'E'], ['Ê', 'E'], ['Ë', 'E'], ['Ì', 'I'], ['Í', 'I'], ['Î', 'I'], ['Ï', 'I'], ['Ð', 'D'], ['Ñ', 'N'], ['Ò', 'O'], ['Ó', 'O'], ['Ô', 'O'], ['Õ', 'O'], ['Ö', 'Oe'], ['Ő', 'O'], ['Ø', 'O'], ['Ù', 'U'], ['Ú', 'U'], ['Û', 'U'], ['Ü', 'Ue'], ['Ű', 'U'], ['Ý', 'Y'], ['Þ', 'TH'], ['ß', 'ss'], ['à', 'a'], ['á', 'a'], ['â', 'a'], ['ã', 'a'], ['ä', 'ae'], ['å', 'a'], ['æ', 'ae'], ['ç', 'c'], ['è', 'e'], ['é', 'e'], ['ê', 'e'], ['ë', 'e'], ['ì', 'i'], ['í', 'i'], ['î', 'i'], ['ï', 'i'], ['ð', 'd'], ['ñ', 'n'], ['ò', 'o'], ['ó', 'o'], ['ô', 'o'], ['õ', 'o'], ['ö', 'oe'], ['ő', 'o'], ['ø', 'o'], ['ù', 'u'], ['ú', 'u'], ['û', 'u'], ['ü', 'ue'], ['ű', 'u'], ['ý', 'y'], ['þ', 'th'], ['ÿ', 'y'], ['ẞ', 'SS'], # Vietnamese ['à', 'a'], ['À', 'A'], ['á', 'a'], ['Á', 'A'], ['â', 'a'], ['Â', 'A'], ['ã', 'a'], ['Ã', 'A'], ['è', 'e'], ['È', 'E'], ['é', 'e'], ['É', 'E'], ['ê', 'e'], ['Ê', 'E'], ['ì', 'i'], ['Ì', 'I'], ['í', 'i'], ['Í', 'I'], ['ò', 'o'], ['Ò', 'O'], ['ó', 'o'], ['Ó', 'O'], ['ô', 'o'], ['Ô', 'O'], ['õ', 'o'], ['Õ', 'O'], ['ù', 'u'], ['Ù', 'U'], ['ú', 'u'], ['Ú', 'U'], ['ý', 'y'], ['Ý', 'Y'], ['ă', 'a'], ['Ă', 'A'], ['Đ', 'D'], ['đ', 'd'], ['ĩ', 'i'], ['Ĩ', 'I'], ['ũ', 'u'], ['Ũ', 'U'], ['ơ', 'o'], ['Ơ', 'O'], ['ư', 'u'], ['Ư', 'U'], ['ạ', 'a'], ['Ạ', 'A'], ['ả', 'a'], ['Ả', 'A'], ['ấ', 'a'], ['Ấ', 'A'], ['ầ', 'a'], ['Ầ', 'A'], ['ẩ', 'a'], ['Ẩ', 'A'], ['ẫ', 'a'], ['Ẫ', 'A'], ['ậ', 'a'], ['Ậ', 'A'], ['ắ', 'a'], ['Ắ', 'A'], ['ằ', 'a'], ['Ằ', 'A'], ['ẳ', 'a'], ['Ẳ', 'A'], ['ẵ', 'a'], ['Ẵ', 'A'], ['ặ', 'a'], ['Ặ', 'A'], ['ẹ', 'e'], ['Ẹ', 'E'], ['ẻ', 'e'], ['Ẻ', 'E'], ['ẽ', 'e'], ['Ẽ', 'E'], ['ế', 'e'], ['Ế', 'E'], ['ề', 'e'], ['Ề', 'E'], ['ể', 'e'], ['Ể', 'E'], ['ễ', 'e'], ['Ễ', 'E'], ['ệ', 'e'], ['Ệ', 'E'], ['ỉ', 'i'], ['Ỉ', 'I'], ['ị', 'i'], ['Ị', 'I'], ['ọ', 'o'], ['Ọ', 'O'], ['ỏ', 'o'], ['Ỏ', 'O'], ['ố', 'o'], ['Ố', 'O'], ['ồ', 'o'], ['Ồ', 'O'], ['ổ', 'o'], ['Ổ', 'O'], ['ỗ', 'o'], ['Ỗ', 'O'], ['ộ', 'o'], ['Ộ', 'O'], ['ớ', 'o'], ['Ớ', 'O'], ['ờ', 'o'], ['Ờ', 'O'], ['ở', 'o'], ['Ở', 'O'], ['ỡ', 'o'], ['Ỡ', 'O'], ['ợ', 'o'], ['Ợ', 'O'], ['ụ', 'u'], ['Ụ', 'U'], ['ủ', 'u'], ['Ủ', 'U'], ['ứ', 'u'], ['Ứ', 'U'], ['ừ', 'u'], ['Ừ', 'U'], ['ử', 'u'], ['Ử', 'U'], ['ữ', 'u'], ['Ữ', 'U'], ['ự', 'u'], ['Ự', 'U'], ['ỳ', 'y'], ['Ỳ', 'Y'], ['ỵ', 'y'], ['Ỵ', 'Y'], ['ỷ', 'y'], ['Ỷ', 'Y'], ['ỹ', 'y'], ['Ỹ', 'Y'], # Arabic ['ء', 'e'], ['آ', 'a'], ['أ', 'a'], ['ؤ', 'w'], ['إ', 'i'], ['ئ', 'y'], ['ا', 'a'], ['ب', 'b'], ['ة', 't'], ['ت', 't'], ['ث', 'th'], ['ج', 'j'], ['ح', 'h'], ['خ', 'kh'], ['د', 'd'], ['ذ', 'dh'], ['ر', 'r'], ['ز', 'z'], ['س', 's'], ['ش', 'sh'], ['ص', 's'], ['ض', 'd'], ['ط', 't'], ['ظ', 'z'], ['ع', 'e'], ['غ', 'gh'], ['ـ', '_'], ['ف', 'f'], ['ق', 'q'], ['ك', 'k'], ['ل', 'l'], ['م', 'm'], ['ن', 'n'], ['ه', 'h'], ['و', 'w'], ['ى', 'a'], ['ي', 'y'], ['َ‎', 'a'], ['ُ', 'u'], ['ِ‎', 'i'], ['٠', '0'], ['١', '1'], ['٢', '2'], ['٣', '3'], ['٤', '4'], ['٥', '5'], ['٦', '6'], ['٧', '7'], ['٨', '8'], ['٩', '9'], # Persian / Farsi ['چ', 'ch'], ['ک', 'k'], ['گ', 'g'], ['پ', 'p'], ['ژ', 'zh'], ['ی', 'y'], ['۰', '0'], ['۱', '1'], ['۲', '2'], ['۳', '3'], ['۴', '4'], ['۵', '5'], ['۶', '6'], ['۷', '7'], ['۸', '8'], ['۹', '9'], # Pashto ['ټ', 'p'], ['ځ', 'z'], ['څ', 'c'], ['ډ', 'd'], ['ﺫ', 'd'], ['ﺭ', 'r'], ['ړ', 'r'], ['ﺯ', 'z'], ['ږ', 'g'], ['ښ', 'x'], ['ګ', 'g'], ['ڼ', 'n'], ['ۀ', 'e'], ['ې', 'e'], ['ۍ', 'ai'], # Urdu ['ٹ', 't'], ['ڈ', 'd'], ['ڑ', 'r'], ['ں', 'n'], ['ہ', 'h'], ['ھ', 'h'], ['ے', 'e'], # Russian ['А', 'A'], ['а', 'a'], ['Б', 'B'], ['б', 'b'], ['В', 'V'], ['в', 'v'], ['Г', 'G'], ['г', 'g'], ['Д', 'D'], ['д', 'd'], ['ъе', 'ye'], ['Ъе', 'Ye'], ['ъЕ', 'yE'], ['ЪЕ', 'YE'], ['Е', 'E'], ['е', 'e'], ['Ё', 'Yo'], ['ё', 'yo'], ['Ж', 'Zh'], ['ж', 'zh'], ['З', 'Z'], ['з', 'z'], ['И', 'I'], ['и', 'i'], ['ый', 'iy'], ['Ый', 'Iy'], ['ЫЙ', 'IY'], ['ыЙ', 'iY'], ['Й', 'Y'], ['й', 'y'], ['К', 'K'], ['к', 'k'], ['Л', 'L'], ['л', 'l'], ['М', 'M'], ['м', 'm'], ['Н', 'N'], ['н', 'n'], ['О', 'O'], ['о', 'o'], ['П', 'P'], ['п', 'p'], ['Р', 'R'], ['р', 'r'], ['С', 'S'], ['с', 's'], ['Т', 'T'], ['т', 't'], ['У', 'U'], ['у', 'u'], ['Ф', 'F'], ['ф', 'f'], ['Х', 'Kh'], ['х', 'kh'], ['Ц', 'Ts'], ['ц', 'ts'], ['Ч', 'Ch'], ['ч', 'ch'], ['Ш', 'Sh'], ['ш', 'sh'], ['Щ', 'Sch'], ['щ', 'sch'], ['Ъ', ''], ['ъ', ''], ['Ы', 'Y'], ['ы', 'y'], ['Ь', ''], ['ь', ''], ['Э', 'E'], ['э', 'e'], ['Ю', 'Yu'], ['ю', 'yu'], ['Я', 'Ya'], ['я', 'ya'], # Romanian ['ă', 'a'], ['Ă', 'A'], ['ș', 's'], ['Ș', 'S'], ['ț', 't'], ['Ț', 'T'], ['ţ', 't'], ['Ţ', 'T'], # Turkish ['ş', 's'], ['Ş', 'S'], ['ç', 'c'], ['Ç', 'C'], ['ğ', 'g'], ['Ğ', 'G'], ['ı', 'i'], ['İ', 'I'], # Armenian ['ա', 'a'], ['Ա', 'A'], ['բ', 'b'], ['Բ', 'B'], ['գ', 'g'], ['Գ', 'G'], ['դ', 'd'], ['Դ', 'D'], ['ե', 'ye'], ['Ե', 'Ye'], ['զ', 'z'], ['Զ', 'Z'], ['է', 'e'], ['Է', 'E'], ['ը', 'y'], ['Ը', 'Y'], ['թ', 't'], ['Թ', 'T'], ['ժ', 'zh'], ['Ժ', 'Zh'], ['ի', 'i'], ['Ի', 'I'], ['լ', 'l'], ['Լ', 'L'], ['խ', 'kh'], ['Խ', 'Kh'], ['ծ', 'ts'], ['Ծ', 'Ts'], ['կ', 'k'], ['Կ', 'K'], ['հ', 'h'], ['Հ', 'H'], ['ձ', 'dz'], ['Ձ', 'Dz'], ['ղ', 'gh'], ['Ղ', 'Gh'], ['ճ', 'tch'], ['Ճ', 'Tch'], ['մ', 'm'], ['Մ', 'M'], ['յ', 'y'], ['Յ', 'Y'], ['ն', 'n'], ['Ն', 'N'], ['շ', 'sh'], ['Շ', 'Sh'], ['ո', 'vo'], ['Ո', 'Vo'], ['չ', 'ch'], ['Չ', 'Ch'], ['պ', 'p'], ['Պ', 'P'], ['ջ', 'j'], ['Ջ', 'J'], ['ռ', 'r'], ['Ռ', 'R'], ['ս', 's'], ['Ս', 'S'], ['վ', 'v'], ['Վ', 'V'], ['տ', 't'], ['Տ', 'T'], ['ր', 'r'], ['Ր', 'R'], ['ց', 'c'], ['Ց', 'C'], ['ու', 'u'], ['ՈՒ', 'U'], ['Ու', 'U'], ['փ', 'p'], ['Փ', 'P'], ['ք', 'q'], ['Ք', 'Q'], ['օ', 'o'], ['Օ', 'O'], ['ֆ', 'f'], ['Ֆ', 'F'], ['և', 'yev'], # Georgian ['ა', 'a'], ['ბ', 'b'], ['გ', 'g'], ['დ', 'd'], ['ე', 'e'], ['ვ', 'v'], ['ზ', 'z'], ['თ', 't'], ['ი', 'i'], ['კ', 'k'], ['ლ', 'l'], ['მ', 'm'], ['ნ', 'n'], ['ო', 'o'], ['პ', 'p'], ['ჟ', 'zh'], ['რ', 'r'], ['ს', 's'], ['ტ', 't'], ['უ', 'u'], ['ფ', 'ph'], ['ქ', 'q'], ['ღ', 'gh'], ['ყ', 'k'], ['შ', 'sh'], ['ჩ', 'ch'], ['ც', 'ts'], ['ძ', 'dz'], ['წ', 'ts'], ['ჭ', 'tch'], ['ხ', 'kh'], ['ჯ', 'j'], ['ჰ', 'h'], # Czech ['č', 'c'], ['ď', 'd'], ['ě', 'e'], ['ň', 'n'], ['ř', 'r'], ['š', 's'], ['ť', 't'], ['ů', 'u'], ['ž', 'z'], ['Č', 'C'], ['Ď', 'D'], ['Ě', 'E'], ['Ň', 'N'], ['Ř', 'R'], ['Š', 'S'], ['Ť', 'T'], ['Ů', 'U'], ['Ž', 'Z'], # Dhivehi ['ހ', 'h'], ['ށ', 'sh'], ['ނ', 'n'], ['ރ', 'r'], ['ބ', 'b'], ['ޅ', 'lh'], ['ކ', 'k'], ['އ', 'a'], ['ވ', 'v'], ['މ', 'm'], ['ފ', 'f'], ['ދ', 'dh'], ['ތ', 'th'], ['ލ', 'l'], ['ގ', 'g'], ['ޏ', 'gn'], ['ސ', 's'], ['ޑ', 'd'], ['ޒ', 'z'], ['ޓ', 't'], ['ޔ', 'y'], ['ޕ', 'p'], ['ޖ', 'j'], ['ޗ', 'ch'], ['ޘ', 'tt'], ['ޙ', 'hh'], ['ޚ', 'kh'], ['ޛ', 'th'], ['ޜ', 'z'], ['ޝ', 'sh'], ['ޞ', 's'], ['ޟ', 'd'], ['ޠ', 't'], ['ޡ', 'z'], ['ޢ', 'a'], ['ޣ', 'gh'], ['ޤ', 'q'], ['ޥ', 'w'], ['ަ', 'a'], ['ާ', 'aa'], ['ި', 'i'], ['ީ', 'ee'], ['ު', 'u'], ['ޫ', 'oo'], ['ެ', 'e'], ['ޭ', 'ey'], ['ޮ', 'o'], ['ޯ', 'oa'], ['ް', ''], # Greek ['α', 'a'], ['β', 'v'], ['γ', 'g'], ['δ', 'd'], ['ε', 'e'], ['ζ', 'z'], ['η', 'i'], ['θ', 'th'], ['ι', 'i'], ['κ', 'k'], ['λ', 'l'], ['μ', 'm'], ['ν', 'n'], ['ξ', 'ks'], ['ο', 'o'], ['π', 'p'], ['ρ', 'r'], ['σ', 's'], ['τ', 't'], ['υ', 'y'], ['φ', 'f'], ['χ', 'x'], ['ψ', 'ps'], ['ω', 'o'], ['ά', 'a'], ['έ', 'e'], ['ί', 'i'], ['ό', 'o'], ['ύ', 'y'], ['ή', 'i'], ['ώ', 'o'], ['ς', 's'], ['ϊ', 'i'], ['ΰ', 'y'], ['ϋ', 'y'], ['ΐ', 'i'], ['Α', 'A'], ['Β', 'B'], ['Γ', 'G'], ['Δ', 'D'], ['Ε', 'E'], ['Ζ', 'Z'], ['Η', 'I'], ['Θ', 'TH'], ['Ι', 'I'], ['Κ', 'K'], ['Λ', 'L'], ['Μ', 'M'], ['Ν', 'N'], ['Ξ', 'KS'], ['Ο', 'O'], ['Π', 'P'], ['Ρ', 'R'], ['Σ', 'S'], ['Τ', 'T'], ['Υ', 'Y'], ['Φ', 'F'], ['Χ', 'X'], ['Ψ', 'PS'], ['Ω', 'O'], ['Ά', 'A'], ['Έ', 'E'], ['Ί', 'I'], ['Ό', 'O'], ['Ύ', 'Y'], ['Ή', 'I'], ['Ώ', 'O'], ['Ϊ', 'I'], ['Ϋ', 'Y'], # Disabled as it conflicts with German and Latin. # Hungarian # ['ä', 'a'], # ['Ä', 'A'], # ['ö', 'o'], # ['Ö', 'O'], # ['ü', 'u'], # ['Ü', 'U'], # ['ű', 'u'], # ['Ű', 'U'], # Latvian ['ā', 'a'], ['ē', 'e'], ['ģ', 'g'], ['ī', 'i'], ['ķ', 'k'], ['ļ', 'l'], ['ņ', 'n'], ['ū', 'u'], ['Ā', 'A'], ['Ē', 'E'], ['Ģ', 'G'], ['Ī', 'I'], ['Ķ', 'K'], ['Ļ', 'L'], ['Ņ', 'N'], ['Ū', 'U'], ['č', 'c'], ['š', 's'], ['ž', 'z'], ['Č', 'C'], ['Š', 'S'], ['Ž', 'Z'], # Lithuanian ['ą', 'a'], ['č', 'c'], ['ę', 'e'], ['ė', 'e'], ['į', 'i'], ['š', 's'], ['ų', 'u'], ['ū', 'u'], ['ž', 'z'], ['Ą', 'A'], ['Č', 'C'], ['Ę', 'E'], ['Ė', 'E'], ['Į', 'I'], ['Š', 'S'], ['Ų', 'U'], ['Ū', 'U'], # Macedonian ['Ќ', 'Kj'], ['ќ', 'kj'], ['Љ', 'Lj'], ['љ', 'lj'], ['Њ', 'Nj'], ['њ', 'nj'], ['Тс', 'Ts'], ['тс', 'ts'], # Polish ['ą', 'a'], ['ć', 'c'], ['ę', 'e'], ['ł', 'l'], ['ń', 'n'], ['ś', 's'], ['ź', 'z'], ['ż', 'z'], ['Ą', 'A'], ['Ć', 'C'], ['Ę', 'E'], ['Ł', 'L'], ['Ń', 'N'], ['Ś', 'S'], ['Ź', 'Z'], ['Ż', 'Z'], # Disabled as it conflicts with Vietnamese. # Serbian # ['љ', 'lj'], # ['њ', 'nj'], # ['Љ', 'Lj'], # ['Њ', 'Nj'], # ['đ', 'dj'], # ['Đ', 'Dj'], # ['ђ', 'dj'], # ['ј', 'j'], # ['ћ', 'c'], # ['џ', 'dz'], # ['Ђ', 'Dj'], # ['Ј', 'j'], # ['Ћ', 'C'], # ['Џ', 'Dz'], # Disabled as it conflicts with German and Latin. # Slovak # ['ä', 'a'], # ['Ä', 'A'], # ['ľ', 'l'], # ['ĺ', 'l'], # ['ŕ', 'r'], # ['Ľ', 'L'], # ['Ĺ', 'L'], # ['Ŕ', 'R'], # Disabled as it conflicts with German and Latin. # Swedish # ['å', 'o'], # ['Å', 'o'], # ['ä', 'a'], # ['Ä', 'A'], # ['ë', 'e'], # ['Ë', 'E'], # ['ö', 'o'], # ['Ö', 'O'], # Ukrainian ['Є', 'Ye'], ['І', 'I'], ['Ї', 'Yi'], ['Ґ', 'G'], ['є', 'ye'], ['і', 'i'], ['ї', 'yi'], ['ґ', 'g'], # Dutch ['IJ', 'IJ'], ['ij', 'ij'], # Danish # ['Æ', 'Ae'], # ['Ø', 'Oe'], # ['Å', 'Aa'], # ['æ', 'ae'], # ['ø', 'oe'], # ['å', 'aa'] # Currencies ['¢', 'c'], ['¥', 'Y'], ['߿', 'b'], ['৳', 't'], ['૱', 'Bo'], ['฿', 'B'], ['₠', 'CE'], ['₡', 'C'], ['₢', 'Cr'], ['₣', 'F'], ['₥', 'm'], ['₦', 'N'], ['₧', 'Pt'], ['₨', 'Rs'], ['₩', 'W'], ['₫', 's'], ['€', 'E'], ['₭', 'K'], ['₮', 'T'], ['₯', 'Dp'], ['₰', 'S'], ['₱', 'P'], ['₲', 'G'], ['₳', 'A'], ['₴', 'S'], ['₵', 'C'], ['₶', 'tt'], ['₷', 'S'], ['₸', 'T'], ['₹', 'R'], ['₺', 'L'], ['₽', 'P'], ['₿', 'B'], ['﹩', '$'], ['¢', 'c'], ['¥', 'Y'], ['₩', 'W'], # Latin ['𝐀', 'A'], ['𝐁', 'B'], ['𝐂', 'C'], ['𝐃', 'D'], ['𝐄', 'E'], ['𝐅', 'F'], ['𝐆', 'G'], ['𝐇', 'H'], ['𝐈', 'I'], ['𝐉', 'J'], ['𝐊', 'K'], ['𝐋', 'L'], ['𝐌', 'M'], ['𝐍', 'N'], ['𝐎', 'O'], ['𝐏', 'P'], ['𝐐', 'Q'], ['𝐑', 'R'], ['𝐒', 'S'], ['𝐓', 'T'], ['𝐔', 'U'], ['𝐕', 'V'], ['𝐖', 'W'], ['𝐗', 'X'], ['𝐘', 'Y'], ['𝐙', 'Z'], ['𝐚', 'a'], ['𝐛', 'b'], ['𝐜', 'c'], ['𝐝', 'd'], ['𝐞', 'e'], ['𝐟', 'f'], ['𝐠', 'g'], ['𝐡', 'h'], ['𝐢', 'i'], ['𝐣', 'j'], ['𝐤', 'k'], ['𝐥', 'l'], ['𝐦', 'm'], ['𝐧', 'n'], ['𝐨', 'o'], ['𝐩', 'p'], ['𝐪', 'q'], ['𝐫', 'r'], ['𝐬', 's'], ['𝐭', 't'], ['𝐮', 'u'], ['𝐯', 'v'], ['𝐰', 'w'], ['𝐱', 'x'], ['𝐲', 'y'], ['𝐳', 'z'], ['𝐴', 'A'], ['𝐵', 'B'], ['𝐶', 'C'], ['𝐷', 'D'], ['𝐸', 'E'], ['𝐹', 'F'], ['𝐺', 'G'], ['𝐻', 'H'], ['𝐼', 'I'], ['𝐽', 'J'], ['𝐾', 'K'], ['𝐿', 'L'], ['𝑀', 'M'], ['𝑁', 'N'], ['𝑂', 'O'], ['𝑃', 'P'], ['𝑄', 'Q'], ['𝑅', 'R'], ['𝑆', 'S'], ['𝑇', 'T'], ['𝑈', 'U'], ['𝑉', 'V'], ['𝑊', 'W'], ['𝑋', 'X'], ['𝑌', 'Y'], ['𝑍', 'Z'], ['𝑎', 'a'], ['𝑏', 'b'], ['𝑐', 'c'], ['𝑑', 'd'], ['𝑒', 'e'], ['𝑓', 'f'], ['𝑔', 'g'], ['𝑖', 'i'], ['𝑗', 'j'], ['𝑘', 'k'], ['𝑙', 'l'], ['𝑚', 'm'], ['𝑛', 'n'], ['𝑜', 'o'], ['𝑝', 'p'], ['𝑞', 'q'], ['𝑟', 'r'], ['𝑠', 's'], ['𝑡', 't'], ['𝑢', 'u'], ['𝑣', 'v'], ['𝑤', 'w'], ['𝑥', 'x'], ['𝑦', 'y'], ['𝑧', 'z'], ['𝑨', 'A'], ['𝑩', 'B'], ['𝑪', 'C'], ['𝑫', 'D'], ['𝑬', 'E'], ['𝑭', 'F'], ['𝑮', 'G'], ['𝑯', 'H'], ['𝑰', 'I'], ['𝑱', 'J'], ['𝑲', 'K'], ['𝑳', 'L'], ['𝑴', 'M'], ['𝑵', 'N'], ['𝑶', 'O'], ['𝑷', 'P'], ['𝑸', 'Q'], ['𝑹', 'R'], ['𝑺', 'S'], ['𝑻', 'T'], ['𝑼', 'U'], ['𝑽', 'V'], ['𝑾', 'W'], ['𝑿', 'X'], ['𝒀', 'Y'], ['𝒁', 'Z'], ['𝒂', 'a'], ['𝒃', 'b'], ['𝒄', 'c'], ['𝒅', 'd'], ['𝒆', 'e'], ['𝒇', 'f'], ['𝒈', 'g'], ['𝒉', 'h'], ['𝒊', 'i'], ['𝒋', 'j'], ['𝒌', 'k'], ['𝒍', 'l'], ['𝒎', 'm'], ['𝒏', 'n'], ['𝒐', 'o'], ['𝒑', 'p'], ['𝒒', 'q'], ['𝒓', 'r'], ['𝒔', 's'], ['𝒕', 't'], ['𝒖', 'u'], ['𝒗', 'v'], ['𝒘', 'w'], ['𝒙', 'x'], ['𝒚', 'y'], ['𝒛', 'z'], ['𝒜', 'A'], ['𝒞', 'C'], ['𝒟', 'D'], ['𝒢', 'g'], ['𝒥', 'J'], ['𝒦', 'K'], ['𝒩', 'N'], ['𝒪', 'O'], ['𝒫', 'P'], ['𝒬', 'Q'], ['𝒮', 'S'], ['𝒯', 'T'], ['𝒰', 'U'], ['𝒱', 'V'], ['𝒲', 'W'], ['𝒳', 'X'], ['𝒴', 'Y'], ['𝒵', 'Z'], ['𝒶', 'a'], ['𝒷', 'b'], ['𝒸', 'c'], ['𝒹', 'd'], ['𝒻', 'f'], ['𝒽', 'h'], ['𝒾', 'i'], ['𝒿', 'j'], ['𝓀', 'h'], ['𝓁', 'l'], ['𝓂', 'm'], ['𝓃', 'n'], ['𝓅', 'p'], ['𝓆', 'q'], ['𝓇', 'r'], ['𝓈', 's'], ['𝓉', 't'], ['𝓊', 'u'], ['𝓋', 'v'], ['𝓌', 'w'], ['𝓍', 'x'], ['𝓎', 'y'], ['𝓏', 'z'], ['𝓐', 'A'], ['𝓑', 'B'], ['𝓒', 'C'], ['𝓓', 'D'], ['𝓔', 'E'], ['𝓕', 'F'], ['𝓖', 'G'], ['𝓗', 'H'], ['𝓘', 'I'], ['𝓙', 'J'], ['𝓚', 'K'], ['𝓛', 'L'], ['𝓜', 'M'], ['𝓝', 'N'], ['𝓞', 'O'], ['𝓟', 'P'], ['𝓠', 'Q'], ['𝓡', 'R'], ['𝓢', 'S'], ['𝓣', 'T'], ['𝓤', 'U'], ['𝓥', 'V'], ['𝓦', 'W'], ['𝓧', 'X'], ['𝓨', 'Y'], ['𝓩', 'Z'], ['𝓪', 'a'], ['𝓫', 'b'], ['𝓬', 'c'], ['𝓭', 'd'], ['𝓮', 'e'], ['𝓯', 'f'], ['𝓰', 'g'], ['𝓱', 'h'], ['𝓲', 'i'], ['𝓳', 'j'], ['𝓴', 'k'], ['𝓵', 'l'], ['𝓶', 'm'], ['𝓷', 'n'], ['𝓸', 'o'], ['𝓹', 'p'], ['𝓺', 'q'], ['𝓻', 'r'], ['𝓼', 's'], ['𝓽', 't'], ['𝓾', 'u'], ['𝓿', 'v'], ['𝔀', 'w'], ['𝔁', 'x'], ['𝔂', 'y'], ['𝔃', 'z'], ['𝔄', 'A'], ['𝔅', 'B'], ['𝔇', 'D'], ['𝔈', 'E'], ['𝔉', 'F'], ['𝔊', 'G'], ['𝔍', 'J'], ['𝔎', 'K'], ['𝔏', 'L'], ['𝔐', 'M'], ['𝔑', 'N'], ['𝔒', 'O'], ['𝔓', 'P'], ['𝔔', 'Q'], ['𝔖', 'S'], ['𝔗', 'T'], ['𝔘', 'U'], ['𝔙', 'V'], ['𝔚', 'W'], ['𝔛', 'X'], ['𝔜', 'Y'], ['𝔞', 'a'], ['𝔟', 'b'], ['𝔠', 'c'], ['𝔡', 'd'], ['𝔢', 'e'], ['𝔣', 'f'], ['𝔤', 'g'], ['𝔥', 'h'], ['𝔦', 'i'], ['𝔧', 'j'], ['𝔨', 'k'], ['𝔩', 'l'], ['𝔪', 'm'], ['𝔫', 'n'], ['𝔬', 'o'], ['𝔭', 'p'], ['𝔮', 'q'], ['𝔯', 'r'], ['𝔰', 's'], ['𝔱', 't'], ['𝔲', 'u'], ['𝔳', 'v'], ['𝔴', 'w'], ['𝔵', 'x'], ['𝔶', 'y'], ['𝔷', 'z'], ['𝔸', 'A'], ['𝔹', 'B'], ['𝔻', 'D'], ['𝔼', 'E'], ['𝔽', 'F'], ['𝔾', 'G'], ['𝕀', 'I'], ['𝕁', 'J'], ['𝕂', 'K'], ['𝕃', 'L'], ['𝕄', 'M'], ['𝕆', 'N'], ['𝕊', 'S'], ['𝕋', 'T'], ['𝕌', 'U'], ['𝕍', 'V'], ['𝕎', 'W'], ['𝕏', 'X'], ['𝕐', 'Y'], ['𝕒', 'a'], ['𝕓', 'b'], ['𝕔', 'c'], ['𝕕', 'd'], ['𝕖', 'e'], ['𝕗', 'f'], ['𝕘', 'g'], ['𝕙', 'h'], ['𝕚', 'i'], ['𝕛', 'j'], ['𝕜', 'k'], ['𝕝', 'l'], ['𝕞', 'm'], ['𝕟', 'n'], ['𝕠', 'o'], ['𝕡', 'p'], ['𝕢', 'q'], ['𝕣', 'r'], ['𝕤', 's'], ['𝕥', 't'], ['𝕦', 'u'], ['𝕧', 'v'], ['𝕨', 'w'], ['𝕩', 'x'], ['𝕪', 'y'], ['𝕫', 'z'], ['𝕬', 'A'], ['𝕭', 'B'], ['𝕮', 'C'], ['𝕯', 'D'], ['𝕰', 'E'], ['𝕱', 'F'], ['𝕲', 'G'], ['𝕳', 'H'], ['𝕴', 'I'], ['𝕵', 'J'], ['𝕶', 'K'], ['𝕷', 'L'], ['𝕸', 'M'], ['𝕹', 'N'], ['𝕺', 'O'], ['𝕻', 'P'], ['𝕼', 'Q'], ['𝕽', 'R'], ['𝕾', 'S'], ['𝕿', 'T'], ['𝖀', 'U'], ['𝖁', 'V'], ['𝖂', 'W'], ['𝖃', 'X'], ['𝖄', 'Y'], ['𝖅', 'Z'], ['𝖆', 'a'], ['𝖇', 'b'], ['𝖈', 'c'], ['𝖉', 'd'], ['𝖊', 'e'], ['𝖋', 'f'], ['𝖌', 'g'], ['𝖍', 'h'], ['𝖎', 'i'], ['𝖏', 'j'], ['𝖐', 'k'], ['𝖑', 'l'], ['𝖒', 'm'], ['𝖓', 'n'], ['𝖔', 'o'], ['𝖕', 'p'], ['𝖖', 'q'], ['𝖗', 'r'], ['𝖘', 's'], ['𝖙', 't'], ['𝖚', 'u'], ['𝖛', 'v'], ['𝖜', 'w'], ['𝖝', 'x'], ['𝖞', 'y'], ['𝖟', 'z'], ['𝖠', 'A'], ['𝖡', 'B'], ['𝖢', 'C'], ['𝖣', 'D'], ['𝖤', 'E'], ['𝖥', 'F'], ['𝖦', 'G'], ['𝖧', 'H'], ['𝖨', 'I'], ['𝖩', 'J'], ['𝖪', 'K'], ['𝖫', 'L'], ['𝖬', 'M'], ['𝖭', 'N'], ['𝖮', 'O'], ['𝖯', 'P'], ['𝖰', 'Q'], ['𝖱', 'R'], ['𝖲', 'S'], ['𝖳', 'T'], ['𝖴', 'U'], ['𝖵', 'V'], ['𝖶', 'W'], ['𝖷', 'X'], ['𝖸', 'Y'], ['𝖹', 'Z'], ['𝖺', 'a'], ['𝖻', 'b'], ['𝖼', 'c'], ['𝖽', 'd'], ['𝖾', 'e'], ['𝖿', 'f'], ['𝗀', 'g'], ['𝗁', 'h'], ['𝗂', 'i'], ['𝗃', 'j'], ['𝗄', 'k'], ['𝗅', 'l'], ['𝗆', 'm'], ['𝗇', 'n'], ['𝗈', 'o'], ['𝗉', 'p'], ['𝗊', 'q'], ['𝗋', 'r'], ['𝗌', 's'], ['𝗍', 't'], ['𝗎', 'u'], ['𝗏', 'v'], ['𝗐', 'w'], ['𝗑', 'x'], ['𝗒', 'y'], ['𝗓', 'z'], ['𝗔', 'A'], ['𝗕', 'B'], ['𝗖', 'C'], ['𝗗', 'D'], ['𝗘', 'E'], ['𝗙', 'F'], ['𝗚', 'G'], ['𝗛', 'H'], ['𝗜', 'I'], ['𝗝', 'J'], ['𝗞', 'K'], ['𝗟', 'L'], ['𝗠', 'M'], ['𝗡', 'N'], ['𝗢', 'O'], ['𝗣', 'P'], ['𝗤', 'Q'], ['𝗥', 'R'], ['𝗦', 'S'], ['𝗧', 'T'], ['𝗨', 'U'], ['𝗩', 'V'], ['𝗪', 'W'], ['𝗫', 'X'], ['𝗬', 'Y'], ['𝗭', 'Z'], ['𝗮', 'a'], ['𝗯', 'b'], ['𝗰', 'c'], ['𝗱', 'd'], ['𝗲', 'e'], ['𝗳', 'f'], ['𝗴', 'g'], ['𝗵', 'h'], ['𝗶', 'i'], ['𝗷', 'j'], ['𝗸', 'k'], ['𝗹', 'l'], ['𝗺', 'm'], ['𝗻', 'n'], ['𝗼', 'o'], ['𝗽', 'p'], ['𝗾', 'q'], ['𝗿', 'r'], ['𝘀', 's'], ['𝘁', 't'], ['𝘂', 'u'], ['𝘃', 'v'], ['𝘄', 'w'], ['𝘅', 'x'], ['𝘆', 'y'], ['𝘇', 'z'], ['𝘈', 'A'], ['𝘉', 'B'], ['𝘊', 'C'], ['𝘋', 'D'], ['𝘌', 'E'], ['𝘍', 'F'], ['𝘎', 'G'], ['𝘏', 'H'], ['𝘐', 'I'], ['𝘑', 'J'], ['𝘒', 'K'], ['𝘓', 'L'], ['𝘔', 'M'], ['𝘕', 'N'], ['𝘖', 'O'], ['𝘗', 'P'], ['𝘘', 'Q'], ['𝘙', 'R'], ['𝘚', 'S'], ['𝘛', 'T'], ['𝘜', 'U'], ['𝘝', 'V'], ['𝘞', 'W'], ['𝘟', 'X'], ['𝘠', 'Y'], ['𝘡', 'Z'], ['𝘢', 'a'], ['𝘣', 'b'], ['𝘤', 'c'], ['𝘥', 'd'], ['𝘦', 'e'], ['𝘧', 'f'], ['𝘨', 'g'], ['𝘩', 'h'], ['𝘪', 'i'], ['𝘫', 'j'], ['𝘬', 'k'], ['𝘭', 'l'], ['𝘮', 'm'], ['𝘯', 'n'], ['𝘰', 'o'], ['𝘱', 'p'], ['𝘲', 'q'], ['𝘳', 'r'], ['𝘴', 's'], ['𝘵', 't'], ['𝘶', 'u'], ['𝘷', 'v'], ['𝘸', 'w'], ['𝘹', 'x'], ['𝘺', 'y'], ['𝘻', 'z'], ['𝘼', 'A'], ['𝘽', 'B'], ['𝘾', 'C'], ['𝘿', 'D'], ['𝙀', 'E'], ['𝙁', 'F'], ['𝙂', 'G'], ['𝙃', 'H'], ['𝙄', 'I'], ['𝙅', 'J'], ['𝙆', 'K'], ['𝙇', 'L'], ['𝙈', 'M'], ['𝙉', 'N'], ['𝙊', 'O'], ['𝙋', 'P'], ['𝙌', 'Q'], ['𝙍', 'R'], ['𝙎', 'S'], ['𝙏', 'T'], ['𝙐', 'U'], ['𝙑', 'V'], ['𝙒', 'W'], ['𝙓', 'X'], ['𝙔', 'Y'], ['𝙕', 'Z'], ['𝙖', 'a'], ['𝙗', 'b'], ['𝙘', 'c'], ['𝙙', 'd'], ['𝙚', 'e'], ['𝙛', 'f'], ['𝙜', 'g'], ['𝙝', 'h'], ['𝙞', 'i'], ['𝙟', 'j'], ['𝙠', 'k'], ['𝙡', 'l'], ['𝙢', 'm'], ['𝙣', 'n'], ['𝙤', 'o'], ['𝙥', 'p'], ['𝙦', 'q'], ['𝙧', 'r'], ['𝙨', 's'], ['𝙩', 't'], ['𝙪', 'u'], ['𝙫', 'v'], ['𝙬', 'w'], ['𝙭', 'x'], ['𝙮', 'y'], ['𝙯', 'z'], ['𝙰', 'A'], ['𝙱', 'B'], ['𝙲', 'C'], ['𝙳', 'D'], ['𝙴', 'E'], ['𝙵', 'F'], ['𝙶', 'G'], ['𝙷', 'H'], ['𝙸', 'I'], ['𝙹', 'J'], ['𝙺', 'K'], ['𝙻', 'L'], ['𝙼', 'M'], ['𝙽', 'N'], ['𝙾', 'O'], ['𝙿', 'P'], ['𝚀', 'Q'], ['𝚁', 'R'], ['𝚂', 'S'], ['𝚃', 'T'], ['𝚄', 'U'], ['𝚅', 'V'], ['𝚆', 'W'], ['𝚇', 'X'], ['𝚈', 'Y'], ['𝚉', 'Z'], ['𝚊', 'a'], ['𝚋', 'b'], ['𝚌', 'c'], ['𝚍', 'd'], ['𝚎', 'e'], ['𝚏', 'f'], ['𝚐', 'g'], ['𝚑', 'h'], ['𝚒', 'i'], ['𝚓', 'j'], ['𝚔', 'k'], ['𝚕', 'l'], ['𝚖', 'm'], ['𝚗', 'n'], ['𝚘', 'o'], ['𝚙', 'p'], ['𝚚', 'q'], ['𝚛', 'r'], ['𝚜', 's'], ['𝚝', 't'], ['𝚞', 'u'], ['𝚟', 'v'], ['𝚠', 'w'], ['𝚡', 'x'], ['𝚢', 'y'], ['𝚣', 'z'], # Dotless letters ['𝚤', 'l'], ['𝚥', 'j'], # Greek ['𝛢', 'A'], ['𝛣', 'B'], ['𝛤', 'G'], ['𝛥', 'D'], ['𝛦', 'E'], ['𝛧', 'Z'], ['𝛨', 'I'], ['𝛩', 'TH'], ['𝛪', 'I'], ['𝛫', 'K'], ['𝛬', 'L'], ['𝛭', 'M'], ['𝛮', 'N'], ['𝛯', 'KS'], ['𝛰', 'O'], ['𝛱', 'P'], ['𝛲', 'R'], ['𝛳', 'TH'], ['𝛴', 'S'], ['𝛵', 'T'], ['𝛶', 'Y'], ['𝛷', 'F'], ['𝛸', 'x'], ['𝛹', 'PS'], ['𝛺', 'O'], ['𝛻', 'D'], ['𝛼', 'a'], ['𝛽', 'b'], ['𝛾', 'g'], ['𝛿', 'd'], ['𝜀', 'e'], ['𝜁', 'z'], ['𝜂', 'i'], ['𝜃', 'th'], ['𝜄', 'i'], ['𝜅', 'k'], ['𝜆', 'l'], ['𝜇', 'm'], ['𝜈', 'n'], ['𝜉', 'ks'], ['𝜊', 'o'], ['𝜋', 'p'], ['𝜌', 'r'], ['𝜍', 's'], ['𝜎', 's'], ['𝜏', 't'], ['𝜐', 'y'], ['𝜑', 'f'], ['𝜒', 'x'], ['𝜓', 'ps'], ['𝜔', 'o'], ['𝜕', 'd'], ['𝜖', 'E'], ['𝜗', 'TH'], ['𝜘', 'K'], ['𝜙', 'f'], ['𝜚', 'r'], ['𝜛', 'p'], ['𝜜', 'A'], ['𝜝', 'V'], ['𝜞', 'G'], ['𝜟', 'D'], ['𝜠', 'E'], ['𝜡', 'Z'], ['𝜢', 'I'], ['𝜣', 'TH'], ['𝜤', 'I'], ['𝜥', 'K'], ['𝜦', 'L'], ['𝜧', 'M'], ['𝜨', 'N'], ['𝜩', 'KS'], ['𝜪', 'O'], ['𝜫', 'P'], ['𝜬', 'S'], ['𝜭', 'TH'], ['𝜮', 'S'], ['𝜯', 'T'], ['𝜰', 'Y'], ['𝜱', 'F'], ['𝜲', 'X'], ['𝜳', 'PS'], ['𝜴', 'O'], ['𝜵', 'D'], ['𝜶', 'a'], ['𝜷', 'v'], ['𝜸', 'g'], ['𝜹', 'd'], ['𝜺', 'e'], ['𝜻', 'z'], ['𝜼', 'i'], ['𝜽', 'th'], ['𝜾', 'i'], ['𝜿', 'k'], ['𝝀', 'l'], ['𝝁', 'm'], ['𝝂', 'n'], ['𝝃', 'ks'], ['𝝄', 'o'], ['𝝅', 'p'], ['𝝆', 'r'], ['𝝇', 's'], ['𝝈', 's'], ['𝝉', 't'], ['𝝊', 'y'], ['𝝋', 'f'], ['𝝌', 'x'], ['𝝍', 'ps'], ['𝝎', 'o'], ['𝝏', 'a'], ['𝝐', 'e'], ['𝝑', 'i'], ['𝝒', 'k'], ['𝝓', 'f'], ['𝝔', 'r'], ['𝝕', 'p'], ['𝝖', 'A'], ['𝝗', 'B'], ['𝝘', 'G'], ['𝝙', 'D'], ['𝝚', 'E'], ['𝝛', 'Z'], ['𝝜', 'I'], ['𝝝', 'TH'], ['𝝞', 'I'], ['𝝟', 'K'], ['𝝠', 'L'], ['𝝡', 'M'], ['𝝢', 'N'], ['𝝣', 'KS'], ['𝝤', 'O'], ['𝝥', 'P'], ['𝝦', 'R'], ['𝝧', 'TH'], ['𝝨', 'S'], ['𝝩', 'T'], ['𝝪', 'Y'], ['𝝫', 'F'], ['𝝬', 'X'], ['𝝭', 'PS'], ['𝝮', 'O'], ['𝝯', 'D'], ['𝝰', 'a'], ['𝝱', 'v'], ['𝝲', 'g'], ['𝝳', 'd'], ['𝝴', 'e'], ['𝝵', 'z'], ['𝝶', 'i'], ['𝝷', 'th'], ['𝝸', 'i'], ['𝝹', 'k'], ['𝝺', 'l'], ['𝝻', 'm'], ['𝝼', 'n'], ['𝝽', 'ks'], ['𝝾', 'o'], ['𝝿', 'p'], ['𝞀', 'r'], ['𝞁', 's'], ['𝞂', 's'], ['𝞃', 't'], ['𝞄', 'y'], ['𝞅', 'f'], ['𝞆', 'x'], ['𝞇', 'ps'], ['𝞈', 'o'], ['𝞉', 'a'], ['𝞊', 'e'], ['𝞋', 'i'], ['𝞌', 'k'], ['𝞍', 'f'], ['𝞎', 'r'], ['𝞏', 'p'], ['𝞐', 'A'], ['𝞑', 'V'], ['𝞒', 'G'], ['𝞓', 'D'], ['𝞔', 'E'], ['𝞕', 'Z'], ['𝞖', 'I'], ['𝞗', 'TH'], ['𝞘', 'I'], ['𝞙', 'K'], ['𝞚', 'L'], ['𝞛', 'M'], ['𝞜', 'N'], ['𝞝', 'KS'], ['𝞞', 'O'], ['𝞟', 'P'], ['𝞠', 'S'], ['𝞡', 'TH'], ['𝞢', 'S'], ['𝞣', 'T'], ['𝞤', 'Y'], ['𝞥', 'F'], ['𝞦', 'X'], ['𝞧', 'PS'], ['𝞨', 'O'], ['𝞩', 'D'], ['𝞪', 'av'], ['𝞫', 'g'], ['𝞬', 'd'], ['𝞭', 'e'], ['𝞮', 'z'], ['𝞯', 'i'], ['𝞰', 'i'], ['𝞱', 'th'], ['𝞲', 'i'], ['𝞳', 'k'], ['𝞴', 'l'], ['𝞵', 'm'], ['𝞶', 'n'], ['𝞷', 'ks'], ['𝞸', 'o'], ['𝞹', 'p'], ['𝞺', 'r'], ['𝞻', 's'], ['𝞼', 's'], ['𝞽', 't'], ['𝞾', 'y'], ['𝞿', 'f'], ['𝟀', 'x'], ['𝟁', 'ps'], ['𝟂', 'o'], ['𝟃', 'a'], ['𝟄', 'e'], ['𝟅', 'i'], ['𝟆', 'k'], ['𝟇', 'f'], ['𝟈', 'r'], ['𝟉', 'p'], ['𝟊', 'F'], ['𝟋', 'f'], ['⒜', '(a)'], ['⒝', '(b)'], ['⒞', '(c)'], ['⒟', '(d)'], ['⒠', '(e)'], ['⒡', '(f)'], ['⒢', '(g)'], ['⒣', '(h)'], ['⒤', '(i)'], ['⒥', '(j)'], ['⒦', '(k)'], ['⒧', '(l)'], ['⒨', '(m)'], ['⒩', '(n)'], ['⒪', '(o)'], ['⒫', '(p)'], ['⒬', '(q)'], ['⒭', '(r)'], ['⒮', '(s)'], ['⒯', '(t)'], ['⒰', '(u)'], ['⒱', '(v)'], ['⒲', '(w)'], ['⒳', '(x)'], ['⒴', '(y)'], ['⒵', '(z)'], ['Ⓐ', '(A)'], ['Ⓑ', '(B)'], ['Ⓒ', '(C)'], ['Ⓓ', '(D)'], ['Ⓔ', '(E)'], ['Ⓕ', '(F)'], ['Ⓖ', '(G)'], ['Ⓗ', '(H)'], ['Ⓘ', '(I)'], ['Ⓙ', '(J)'], ['Ⓚ', '(K)'], ['Ⓛ', '(L)'], ['Ⓝ', '(N)'], ['Ⓞ', '(O)'], ['Ⓟ', '(P)'], ['Ⓠ', '(Q)'], ['Ⓡ', '(R)'], ['Ⓢ', '(S)'], ['Ⓣ', '(T)'], ['Ⓤ', '(U)'], ['Ⓥ', '(V)'], ['Ⓦ', '(W)'], ['Ⓧ', '(X)'], ['Ⓨ', '(Y)'], ['Ⓩ', '(Z)'], ['ⓐ', '(a)'], ['ⓑ', '(b)'], ['ⓒ', '(b)'], ['ⓓ', '(c)'], ['ⓔ', '(e)'], ['ⓕ', '(f)'], ['ⓖ', '(g)'], ['ⓗ', '(h)'], ['ⓘ', '(i)'], ['ⓙ', '(j)'], ['ⓚ', '(k)'], ['ⓛ', '(l)'], ['ⓜ', '(m)'], ['ⓝ', '(n)'], ['ⓞ', '(o)'], ['ⓟ', '(p)'], ['ⓠ', '(q)'], ['ⓡ', '(r)'], ['ⓢ', '(s)'], ['ⓣ', '(t)'], ['ⓤ', '(u)'], ['ⓥ', '(v)'], ['ⓦ', '(w)'], ['ⓧ', '(x)'], ['ⓨ', '(y)'], ['ⓩ', '(z)'], # Numbers ['𝟎', '0'], ['𝟏', '1'], ['𝟐', '2'], ['𝟑', '3'], ['𝟒', '4'], ['𝟓', '5'], ['𝟔', '6'], ['𝟕', '7'], ['𝟖', '8'], ['𝟗', '9'], ['𝟘', '0'], ['𝟙', '1'], ['𝟚', '2'], ['𝟛', '3'], ['𝟜', '4'], ['𝟝', '5'], ['𝟞', '6'], ['𝟟', '7'], ['𝟠', '8'], ['𝟡', '9'], ['𝟢', '0'], ['𝟣', '1'], ['𝟤', '2'], ['𝟥', '3'], ['𝟦', '4'], ['𝟧', '5'], ['𝟨', '6'], ['𝟩', '7'], ['𝟪', '8'], ['𝟫', '9'], ['𝟬', '0'], ['𝟭', '1'], ['𝟮', '2'], ['𝟯', '3'], ['𝟰', '4'], ['𝟱', '5'], ['𝟲', '6'], ['𝟳', '7'], ['𝟴', '8'], ['𝟵', '9'], ['𝟶', '0'], ['𝟷', '1'], ['𝟸', '2'], ['𝟹', '3'], ['𝟺', '4'], ['𝟻', '5'], ['𝟼', '6'], ['𝟽', '7'], ['𝟾', '8'], ['𝟿', '9'], ['①', '1'], ['②', '2'], ['③', '3'], ['④', '4'], ['⑤', '5'], ['⑥', '6'], ['⑦', '7'], ['⑧', '8'], ['⑨', '9'], ['⑩', '10'], ['⑪', '11'], ['⑫', '12'], ['⑬', '13'], ['⑭', '14'], ['⑮', '15'], ['⑯', '16'], ['⑰', '17'], ['⑱', '18'], ['⑲', '19'], ['⑳', '20'], ['⑴', '1'], ['⑵', '2'], ['⑶', '3'], ['⑷', '4'], ['⑸', '5'], ['⑹', '6'], ['⑺', '7'], ['⑻', '8'], ['⑼', '9'], ['⑽', '10'], ['⑾', '11'], ['⑿', '12'], ['⒀', '13'], ['⒁', '14'], ['⒂', '15'], ['⒃', '16'], ['⒄', '17'], ['⒅', '18'], ['⒆', '19'], ['⒇', '20'], ['⒈', '1.'], ['⒉', '2.'], ['⒊', '3.'], ['⒋', '4.'], ['⒌', '5.'], ['⒍', '6.'], ['⒎', '7.'], ['⒏', '8.'], ['⒐', '9.'], ['⒑', '10.'], ['⒒', '11.'], ['⒓', '12.'], ['⒔', '13.'], ['⒕', '14.'], ['⒖', '15.'], ['⒗', '16.'], ['⒘', '17.'], ['⒙', '18.'], ['⒚', '19.'], ['⒛', '20.'], ['⓪', '0'], ['⓫', '11'], ['⓬', '12'], ['⓭', '13'], ['⓮', '14'], ['⓯', '15'], ['⓰', '16'], ['⓱', '17'], ['⓲', '18'], ['⓳', '19'], ['⓴', '20'], ['⓵', '1'], ['⓶', '2'], ['⓷', '3'], ['⓸', '4'], ['⓹', '5'], ['⓺', '6'], ['⓻', '7'], ['⓼', '8'], ['⓽', '9'], ['⓾', '10'], ['⓿', '0'], # Punctuation ['🙰', '&'], ['🙱', '&'], ['🙲', '&'], ['🙳', '&'], ['🙴', '&'], ['🙵', '&'], ['🙶', '"'], ['🙷', '"'], ['🙸', '"'], ['‽', '?!'], ['🙹', '?!'], ['🙺', '?!'], ['🙻', '?!'], ['🙼', '/'], ['🙽', '\\'], # Alchemy ['🜇', 'AR'], ['🜈', 'V'], ['🜉', 'V'], ['🜆', 'VR'], ['🜅', 'VF'], ['🜩', '2'], ['🜪', '5'], ['🝡', 'f'], ['🝢', 'W'], ['🝣', 'U'], ['🝧', 'V'], ['🝨', 'T'], ['🝪', 'V'], ['🝫', 'MB'], ['🝬', 'VB'], ['🝲', '3B'], ['🝳', '3B'], # Emojis ['💯', '100'], ['🔙', 'BACK'], ['🔚', 'END'], ['🔛', 'ON!'], ['🔜', 'SOON'], ['🔝', 'TOP'], ['🔞', '18'], ['🔤', 'abc'], ['🔠', 'ABCD'], ['🔡', 'abcd'], ['🔢', '1234'], ['🔣', 'T&@%'], ['#️⃣', '#'], ['*️⃣', '*'], ['0️⃣', '0'], ['1️⃣', '1'], ['2️⃣', '2'], ['3️⃣', '3'], ['4️⃣', '4'], ['5️⃣', '5'], ['6️⃣', '6'], ['7️⃣', '7'], ['8️⃣', '8'], ['9️⃣', '9'], ['🔟', '10'], ['🅰️', 'A'], ['🅱️', 'B'], ['🆎', 'AB'], ['🆑', 'CL'], ['🅾️', 'O'], ['🅿', 'P'], ['🆘', 'SOS'], ['🅲', 'C'], ['🅳', 'D'], ['🅴', 'E'], ['🅵', 'F'], ['🅶', 'G'], ['🅷', 'H'], ['🅸', 'I'], ['🅹', 'J'], ['🅺', 'K'], ['🅻', 'L'], ['🅼', 'M'], ['🅽', 'N'], ['🆀', 'Q'], ['🆁', 'R'], ['🆂', 'S'], ['🆃', 'T'], ['🆄', 'U'], ['🆅', 'V'], ['🆆', 'W'], ['🆇', 'X'], ['🆈', 'Y'], ['🆉', 'Z'], ]
TensorFlow2/Recommendation/DLRM_and_DCNv2/nn
nn
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com)
PyTorch/Detection/Efficientdet/effdet/object_detection
object_detection
box_list
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Bounding Box List definition. BoxList represents a list of bounding boxes as tensorflow tensors, where each bounding box is represented as a row of 4 numbers, [y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes within a given list correspond to a single image. See also box_list_ops.py for common box related operations (such as area, iou, etc). Optionally, users can add additional related fields (such as weights). We assume the following things to be true about fields: * they correspond to boxes in the box_list along the 0th dimension * they have inferable rank at graph construction time * all dimensions except for possibly the 0th can be inferred (i.e., not None) at graph construction time. Some other notes: * Following tensorflow conventions, we use height, width ordering, and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering * Tensors are always provided as (flat) [N, 4] tensors. """ import torch from typing import Optional, List, Dict @torch.jit.script class BoxList(object): """Box collection.""" data: Dict[str, torch.Tensor] def __init__(self, boxes): """Constructs box collection. Args: boxes: a tensor of shape [N, 4] representing box corners Raises: ValueError: if invalid dimensions for bbox data or if bbox data is not in float32 format. """ if len(boxes.shape) != 2 or boxes.shape[-1] != 4: raise ValueError('Invalid dimensions for box data.') if boxes.dtype != torch.float32: raise ValueError('Invalid tensor type: should be tf.float32') self.data = {'boxes': boxes} def num_boxes(self): """Returns number of boxes held in collection. Returns: a tensor representing the number of boxes held in the collection. """ return self.data['boxes'].shape[0] def get_all_fields(self): """Returns all fields.""" return self.data.keys() def get_extra_fields(self): """Returns all non-box fields (i.e., everything not named 'boxes').""" # return [k for k in self.data.keys() if k != 'boxes'] # FIXME torscript doesn't support comprehensions yet extra: List[str] = [] for k in self.data.keys(): if k != 'boxes': extra.append(k) return extra def add_field(self, field: str, field_data: torch.Tensor): """Add field to box list. This method can be used to add related box data such as weights/labels, etc. Args: field: a string key to access the data via `get` field_data: a tensor containing the data to store in the BoxList """ self.data[field] = field_data def has_field(self, field: str): return field in self.data #@property # FIXME for torchscript compat def boxes(self): """Convenience function for accessing box coordinates. Returns: a tensor with shape [N, 4] representing box coordinates. """ return self.get_field('boxes') #@boxes.setter # FIXME for torchscript compat def set_boxes(self, boxes): """Convenience function for setting box coordinates. Args: boxes: a tensor of shape [N, 4] representing box corners Raises: ValueError: if invalid dimensions for bbox data """ if len(boxes.shape) != 2 or boxes.shape[-1] != 4: raise ValueError('Invalid dimensions for box data.') self.data['boxes'] = boxes def get_field(self, field: str): """Accesses a box collection and associated fields. This function returns specified field with object; if no field is specified, it returns the box coordinates. Args: field: this optional string parameter can be used to specify a related field to be accessed. Returns: a tensor representing the box collection or an associated field. Raises: ValueError: if invalid field """ if not self.has_field(field): raise ValueError('field ' + str(field) + ' does not exist') return self.data[field] def set_field(self, field: str, value: torch.Tensor): """Sets the value of a field. Updates the field of a box_list with a given value. Args: field: (string) name of the field to set value. value: the value to assign to the field. Raises: ValueError: if the box_list does not have specified field. """ if not self.has_field(field): raise ValueError('field ' + str(field) + ' does not exist') self.data[field] = value def get_center_coordinates_and_sizes(self): """Computes the center coordinates, height and width of the boxes. Returns: a list of 4 1-D tensors [ycenter, xcenter, height, width]. """ box_corners = self.boxes() ymin, xmin, ymax, xmax = box_corners.t().unbind() width = xmax - xmin height = ymax - ymin ycenter = ymin + height / 2. xcenter = xmin + width / 2. return [ycenter, xcenter, height, width] def transpose_coordinates(self): """Transpose the coordinate representation in a boxlist. """ y_min, x_min, y_max, x_max = self.boxes().chunk(4, dim=1) self.set_boxes(torch.cat([x_min, y_min, x_max, y_max], 1)) def as_tensor_dict(self, fields: Optional[List[str]] = None): """Retrieves specified fields as a dictionary of tensors. Args: fields: (optional) list of fields to return in the dictionary. If None (default), all fields are returned. Returns: tensor_dict: A dictionary of tensors specified by fields. Raises: ValueError: if specified field is not contained in boxlist. """ tensor_dict = {} if fields is None: fields = self.get_all_fields() for field in fields: if not self.has_field(field): raise ValueError('boxlist must contain all specified fields') tensor_dict[field] = self.get_field(field) return tensor_dict #@property def device(self): return self.data['boxes'].device
PyTorch/LanguageModeling/BERT/processors
processors
glue
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import os import sys class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text_a, text_b=None, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text_a = text_a self.text_b = text_b self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_id): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_id = label_id class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_tsv(cls, input_file, quotechar=None): """Reads a tab separated value file.""" with open(input_file, "r") as f: reader = csv.reader(f, delimiter="\t", quotechar=quotechar) lines = [] for line in reader: if sys.version_info[0] == 2: line = list(unicode(cell, 'utf-8') for cell in line) lines.append(line) return lines class MrpcProcessor(DataProcessor): """Processor for the MRPC data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", ) def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[3] text_b = line[4] label = line[0] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class MnliProcessor(DataProcessor): """Processor for the MultiNLI data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched", ) def get_labels(self): """See base class.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[8] text_b = line[9] label = line[-1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples class ColaProcessor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", ) def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text_a = line[3] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples class Sst2Processor(DataProcessor): """Processor for the CoLA data set (GLUE version).""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "train.tsv")), "train", ) def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples( self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev", ) def get_labels(self): """See base class.""" return ["0", "1"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for (i, line) in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, i) text_a = line[0] label = line[1] examples.append( InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) return examples def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" label_map = {label: i for i, label in enumerate(label_list)} features = [] for (ex_index, example) in enumerate(examples): tokens_a = tokenizer.tokenize(example.text_a) tokens_b = None if example.text_b: tokens_b = tokenizer.tokenize(example.text_b) # Modifies `tokens_a` and `tokens_b` in place so that the total # length is less than the specified length. # Account for [CLS], [SEP], [SEP] with "- 3" _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) else: # Account for [CLS] and [SEP] with "- 2" if len(tokens_a) > max_seq_length - 2: tokens_a = tokens_a[:(max_seq_length - 2)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambigiously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens = ["[CLS]"] + tokens_a + ["[SEP]"] segment_ids = [0] * len(tokens) if tokens_b: tokens += tokens_b + ["[SEP]"] segment_ids += [1] * (len(tokens_b) + 1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. padding = [0] * (max_seq_length - len(input_ids)) input_ids += padding input_mask += padding segment_ids += padding assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length label_id = label_map[example.label] features.append( InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)) return features, label_map def _truncate_seq_pair(tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer sequence # one token at a time. This makes more sense than truncating an equal percent # of tokens from each, since if one sequence is very short then each token # that's truncated likely contains more information than a longer sequence. while True: total_length = len(tokens_a) + len(tokens_b) if total_length <= max_length: break if len(tokens_a) > len(tokens_b): tokens_a.pop() else: tokens_b.pop() PROCESSORS = { "cola": ColaProcessor, "mnli": MnliProcessor, "mrpc": MrpcProcessor, "sst-2": Sst2Processor, }
PyTorch/LanguageModeling/BERT/triton/large/runner
runner
start_NVIDIA-T4
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Install Docker . /etc/os-release && \ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \ curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \ curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \ apt-get update && \ apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2 # Install packages pip install -r triton/runner/requirements.txt # Evaluate Runner python3 -m "triton.large.runner.__main__" \ --config-path "triton/large/runner/config_NVIDIA-T4.yaml" \ --device 0
TensorFlow/Detection/SSD/models/research/slim
slim
export_inference_graph
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== r"""Saves out a GraphDef containing the architecture of the model. To use it, run something like this, with a model name defined by slim: bazel build tensorflow_models/research/slim:export_inference_graph bazel-bin/tensorflow_models/research/slim/export_inference_graph \ --model_name=inception_v3 --output_file=/tmp/inception_v3_inf_graph.pb If you then want to use the resulting model with your own or pretrained checkpoints as part of a mobile model, you can run freeze_graph to get a graph def with the variables inlined as constants using: bazel build tensorflow/python/tools:freeze_graph bazel-bin/tensorflow/python/tools/freeze_graph \ --input_graph=/tmp/inception_v3_inf_graph.pb \ --input_checkpoint=/tmp/checkpoints/inception_v3.ckpt \ --input_binary=true --output_graph=/tmp/frozen_inception_v3.pb \ --output_node_names=InceptionV3/Predictions/Reshape_1 The output node names will vary depending on the model, but you can inspect and estimate them using the summarize_graph tool: bazel build tensorflow/tools/graph_transforms:summarize_graph bazel-bin/tensorflow/tools/graph_transforms/summarize_graph \ --in_graph=/tmp/inception_v3_inf_graph.pb To run the resulting graph in C++, you can look at the label_image sample code: bazel build tensorflow/examples/label_image:label_image bazel-bin/tensorflow/examples/label_image/label_image \ --image=${HOME}/Pictures/flowers.jpg \ --input_layer=input \ --output_layer=InceptionV3/Predictions/Reshape_1 \ --graph=/tmp/frozen_inception_v3.pb \ --labels=/tmp/imagenet_slim_labels.txt \ --input_mean=0 \ --input_std=255 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tensorflow as tf from tensorflow.python.platform import gfile from datasets import dataset_factory from nets import nets_factory slim = tf.contrib.slim tf.app.flags.DEFINE_string( 'model_name', 'inception_v3', 'The name of the architecture to save.') tf.app.flags.DEFINE_boolean( 'is_training', False, 'Whether to save out a training-focused version of the model.') tf.app.flags.DEFINE_integer( 'image_size', None, 'The image size to use, otherwise use the model default_image_size.') tf.app.flags.DEFINE_integer( 'batch_size', None, 'Batch size for the exported model. Defaulted to "None" so batch size can ' 'be specified at model runtime.') tf.app.flags.DEFINE_string('dataset_name', 'imagenet', 'The name of the dataset to use with the model.') tf.app.flags.DEFINE_integer( 'labels_offset', 0, 'An offset for the labels in the dataset. This flag is primarily used to ' 'evaluate the VGG and ResNet architectures which do not use a background ' 'class for the ImageNet dataset.') tf.app.flags.DEFINE_string( 'output_file', '', 'Where to save the resulting file to.') tf.app.flags.DEFINE_string( 'dataset_dir', '', 'Directory to save intermediate dataset files to') tf.app.flags.DEFINE_bool( 'quantize', False, 'whether to use quantized graph or not.') tf.app.flags.DEFINE_bool( 'is_video_model', False, 'whether to use 5-D inputs for video model.') tf.app.flags.DEFINE_integer( 'num_frames', None, 'The number of frames to use. Only used if is_video_model is True.') tf.app.flags.DEFINE_bool('write_text_graphdef', False, 'Whether to write a text version of graphdef.') FLAGS = tf.app.flags.FLAGS def main(_): if not FLAGS.output_file: raise ValueError('You must supply the path to save to with --output_file') if FLAGS.is_video_model and not FLAGS.num_frames: raise ValueError( 'Number of frames must be specified for video models with --num_frames') tf.logging.set_verbosity(tf.logging.INFO) with tf.Graph().as_default() as graph: dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train', FLAGS.dataset_dir) network_fn = nets_factory.get_network_fn( FLAGS.model_name, num_classes=(dataset.num_classes - FLAGS.labels_offset), is_training=FLAGS.is_training) image_size = FLAGS.image_size or network_fn.default_image_size if FLAGS.is_video_model: input_shape = [FLAGS.batch_size, FLAGS.num_frames, image_size, image_size, 3] else: input_shape = [FLAGS.batch_size, image_size, image_size, 3] placeholder = tf.placeholder(name='input', dtype=tf.float32, shape=input_shape) network_fn(placeholder) if FLAGS.quantize: tf.contrib.quantize.create_eval_graph() graph_def = graph.as_graph_def() if FLAGS.write_text_graphdef: tf.io.write_graph( graph_def, os.path.dirname(FLAGS.output_file), os.path.basename(FLAGS.output_file), as_text=True) else: with gfile.GFile(FLAGS.output_file, 'wb') as f: f.write(graph_def.SerializeToString()) if __name__ == '__main__': tf.app.run()
TensorFlow/Segmentation/VNet/utils
utils
model_fn
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import horovod.tensorflow as hvd import tensorflow as tf from model.vnet import Builder from utils.var_storage import model_variable_scope def dice_coef(predict, target, dice_type, axis=1, eps=1e-6): intersection = tf.reduce_sum(predict * target, axis=axis) if dice_type == 'sorensen': union = tf.reduce_sum(predict + target, axis=axis) else: raise ValueError("dice_type must be either sorensen") dice = (2 * intersection + eps) / (union + eps) return tf.reduce_mean(dice, axis=0) # average over batch def vnet_v2(features, labels, mode, params): is_training = (mode == tf.estimator.ModeKeys.TRAIN) is_eval = (mode == tf.estimator.ModeKeys.EVAL) is_predict = (mode == tf.estimator.ModeKeys.PREDICT) num_classes = len(params.labels) channel_axis = -1 with model_variable_scope( 'vnet', reuse=tf.AUTO_REUSE, dtype=tf.float16, debug_mode=False ): features = tf.reshape(features, [params.batch_size] + params.input_shape + [1]) if labels is not None: labels = tf.reshape(labels, [params.batch_size] + params.input_shape + [1]) logits = Builder(kernel_size=params.convolution_size, n_classes=num_classes, downscale_blocks=params.downscale_blocks, upscale_blocks=params.upscale_blocks, upsampling=params.upsampling, pooling=params.pooling, normalization=params.normalization_layer, activation=params.activation, mode=mode)(features) softmax = tf.nn.softmax(logits=logits, axis=channel_axis) if is_predict: prediction = tf.argmax(input=softmax, axis=channel_axis) predictions = {'prediction': prediction} return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Flattened logits and softmax - in FP32 flattened_softmax = tf.reshape(softmax, [tf.shape(logits)[0], -1, num_classes]) flattened_softmax = tf.cast(flattened_softmax, tf.float32) # One hot encoding flattened_labels = tf.layers.flatten(labels) one_hot_labels = tf.one_hot(indices=flattened_labels, depth=num_classes, dtype=tf.float32) with tf.name_scope("loss"): if params.loss == 'dice': loss = dice_coef(predict=tf.cast(flattened_softmax, tf.float32), target=one_hot_labels, dice_type='sorensen') total_loss = tf.identity(tf.reduce_sum(1. - loss), name='total_loss_ref') else: raise NotImplementedError train_op = None if is_training: global_step = tf.train.get_or_create_global_step() with tf.name_scope("optimizer"): if params.optimizer == 'rmsprop': optimizer = tf.train.RMSPropOptimizer(learning_rate=params.base_lr, momentum=params.momentum, centered=True) else: raise NotImplementedError update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): gradients, variables = zip(*optimizer.compute_gradients(total_loss)) if params.gradient_clipping == 'global_norm': gradients, _ = tf.clip_by_global_norm(gradients, 1.0) tf.logging.info('clipping: global_norm') else: return NotImplementedError optimizer = hvd.DistributedOptimizer(optimizer) try: amp_envar_enabled = (int(os.environ['TF_ENABLE_AUTO_MIXED_PRECISION']) == 1) except KeyError: amp_envar_enabled = False if params.use_amp and not amp_envar_enabled: optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite( optimizer, loss_scale='dynamic' ) train_op = optimizer.minimize(total_loss, global_step=global_step) eval_metric_ops = None if is_eval: dice_loss = dice_coef(predict=tf.cast(flattened_softmax, tf.float32), target=one_hot_labels, dice_type='sorensen') eval_loss = tf.identity(dice_loss, name='eval_loss_ref') eval_metric_ops = {} for i in range(num_classes): eval_metric_ops['%s dice' % params.labels[str(i)]] = tf.metrics.mean(eval_loss[i]) return tf.estimator.EstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
TensorFlow2/LanguageModeling/ELECTRA
ELECTRA
configuration
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ELECTRA model configuration """ import logging from configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/electra-small-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-generator/config.json", "google/electra-base-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-generator/config.json", "google/electra-large-generator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-generator/config.json", "google/electra-small-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-small-discriminator/config.json", "google/electra-base-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-base-discriminator/config.json", "google/electra-large-discriminator": "https://s3.amazonaws.com/models.huggingface.co/bert/google/electra-large-discriminator/config.json", } class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.ElectraModel`. It is used to instantiate an ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA `google/electra-small-discriminator <https://huggingface.co/google/electra-small-discriminator>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, optional, defaults to 30522): Vocabulary size of the ELECTRA model. Defines the different tokens that can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.ElectraModel`. embedding_size (:obj:`int`, optional, defaults to 128): Dimensionality of the encoder layers and the pooler layer. hidden_size (:obj:`int`, optional, defaults to 256): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, optional, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, optional, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, optional, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, optional, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, optional, defaults to 2): The vocabulary size of the `token_type_ids` passed into :class:`~transformers.ElectraModel`. initializer_range (:obj:`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, optional, defaults to 1e-12): The epsilon used by the layer normalization layers. Example:: from transformers import ElectraModel, ElectraConfig # Initializing a ELECTRA electra-base-uncased style configuration configuration = ElectraConfig() # Initializing a model from the electra-base-uncased style configuration model = ElectraModel(configuration) # Accessing the model configuration configuration = model.config Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ pretrained_config_archive_map = ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "electra" def __init__( self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, **kwargs ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps