relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
PyTorch/SpeechSynthesis/HiFiGAN/fastpitch
fastpitch
model
# ***************************************************************************** # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # ***************************************************************************** from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from common.layers import ConvReLUNorm from common.utils import mask_from_lens from fastpitch.alignment import b_mas, mas_width1 from fastpitch.attention import ConvAttention from fastpitch.transformer import FFTransformer def regulate_len(durations, enc_out, pace: float = 1.0, mel_max_len: Optional[int] = None): """If target=None, then predicted durations are applied""" dtype = enc_out.dtype reps = durations.float() / pace reps = (reps + 0.5).long() dec_lens = reps.sum(dim=1) max_len = dec_lens.max() reps_cumsum = torch.cumsum(F.pad(reps, (1, 0, 0, 0), value=0.0), dim=1)[:, None, :] reps_cumsum = reps_cumsum.to(dtype) range_ = torch.arange(max_len).to(enc_out.device)[None, :, None] mult = ((reps_cumsum[:, :, :-1] <= range_) & (reps_cumsum[:, :, 1:] > range_)) mult = mult.to(dtype) enc_rep = torch.matmul(mult, enc_out) if mel_max_len is not None: enc_rep = enc_rep[:, :mel_max_len] dec_lens = torch.clamp_max(dec_lens, mel_max_len) return enc_rep, dec_lens def average_pitch(pitch, durs): durs_cums_ends = torch.cumsum(durs, dim=1).long() durs_cums_starts = F.pad(durs_cums_ends[:, :-1], (1, 0)) pitch_nonzero_cums = F.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0)) pitch_cums = F.pad(torch.cumsum(pitch, dim=2), (1, 0)) bs, l = durs_cums_ends.size() n_formants = pitch.size(1) dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l) dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l) pitch_sums = (torch.gather(pitch_cums, 2, dce) - torch.gather(pitch_cums, 2, dcs)).float() pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce) - torch.gather(pitch_nonzero_cums, 2, dcs)).float() pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems, pitch_sums / pitch_nelems) return pitch_avg class TemporalPredictor(nn.Module): """Predicts a single float per each temporal location""" def __init__(self, input_size, filter_size, kernel_size, dropout, n_layers=2, n_predictions=1): super(TemporalPredictor, self).__init__() self.layers = nn.Sequential(*[ ConvReLUNorm(input_size if i == 0 else filter_size, filter_size, kernel_size=kernel_size, dropout=dropout) for i in range(n_layers)] ) self.n_predictions = n_predictions self.fc = nn.Linear(filter_size, self.n_predictions, bias=True) def forward(self, enc_out, enc_out_mask): out = enc_out * enc_out_mask out = self.layers(out.transpose(1, 2)).transpose(1, 2) out = self.fc(out) * enc_out_mask return out class FastPitch(nn.Module): def __init__(self, n_mel_channels, n_symbols, padding_idx, symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads, in_fft_d_head, in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size, in_fft_output_size, p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb, out_fft_n_layers, out_fft_n_heads, out_fft_d_head, out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size, out_fft_output_size, p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb, dur_predictor_kernel_size, dur_predictor_filter_size, p_dur_predictor_dropout, dur_predictor_n_layers, pitch_predictor_kernel_size, pitch_predictor_filter_size, p_pitch_predictor_dropout, pitch_predictor_n_layers, pitch_embedding_kernel_size, energy_conditioning, energy_predictor_kernel_size, energy_predictor_filter_size, p_energy_predictor_dropout, energy_predictor_n_layers, energy_embedding_kernel_size, n_speakers, speaker_emb_weight, pitch_conditioning_formants=1): super(FastPitch, self).__init__() self.encoder = FFTransformer( n_layer=in_fft_n_layers, n_head=in_fft_n_heads, d_model=symbols_embedding_dim, d_head=in_fft_d_head, d_inner=in_fft_conv1d_filter_size, kernel_size=in_fft_conv1d_kernel_size, dropout=p_in_fft_dropout, dropatt=p_in_fft_dropatt, dropemb=p_in_fft_dropemb, embed_input=True, d_embed=symbols_embedding_dim, n_embed=n_symbols, padding_idx=padding_idx) if n_speakers > 1: self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim) else: self.speaker_emb = None self.speaker_emb_weight = speaker_emb_weight self.duration_predictor = TemporalPredictor( in_fft_output_size, filter_size=dur_predictor_filter_size, kernel_size=dur_predictor_kernel_size, dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers ) self.decoder = FFTransformer( n_layer=out_fft_n_layers, n_head=out_fft_n_heads, d_model=symbols_embedding_dim, d_head=out_fft_d_head, d_inner=out_fft_conv1d_filter_size, kernel_size=out_fft_conv1d_kernel_size, dropout=p_out_fft_dropout, dropatt=p_out_fft_dropatt, dropemb=p_out_fft_dropemb, embed_input=False, d_embed=symbols_embedding_dim ) self.pitch_predictor = TemporalPredictor( in_fft_output_size, filter_size=pitch_predictor_filter_size, kernel_size=pitch_predictor_kernel_size, dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers, n_predictions=pitch_conditioning_formants ) self.pitch_emb = nn.Conv1d( pitch_conditioning_formants, symbols_embedding_dim, kernel_size=pitch_embedding_kernel_size, padding=int((pitch_embedding_kernel_size - 1) / 2)) # Store values precomputed for training data within the model self.register_buffer('pitch_mean', torch.zeros(1)) self.register_buffer('pitch_std', torch.zeros(1)) self.energy_conditioning = energy_conditioning if energy_conditioning: self.energy_predictor = TemporalPredictor( in_fft_output_size, filter_size=energy_predictor_filter_size, kernel_size=energy_predictor_kernel_size, dropout=p_energy_predictor_dropout, n_layers=energy_predictor_n_layers, n_predictions=1 ) self.energy_emb = nn.Conv1d( 1, symbols_embedding_dim, kernel_size=energy_embedding_kernel_size, padding=int((energy_embedding_kernel_size - 1) / 2)) self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True) self.attention = ConvAttention( n_mel_channels, 0, symbols_embedding_dim, use_query_proj=True, align_query_enc_type='3xconv') def binarize_attention(self, attn, in_lens, out_lens): """For training purposes only. Binarizes attention with MAS. These will no longer recieve a gradient. Args: attn: B x 1 x max_mel_len x max_text_len """ b_size = attn.shape[0] with torch.no_grad(): attn_cpu = attn.data.cpu().numpy() attn_out = torch.zeros_like(attn) for ind in range(b_size): hard_attn = mas_width1( attn_cpu[ind, 0, :out_lens[ind], :in_lens[ind]]) attn_out[ind, 0, :out_lens[ind], :in_lens[ind]] = torch.tensor( hard_attn, device=attn.get_device()) return attn_out def binarize_attention_parallel(self, attn, in_lens, out_lens): """For training purposes only. Binarizes attention with MAS. These will no longer recieve a gradient. Args: attn: B x 1 x max_mel_len x max_text_len """ with torch.no_grad(): attn_cpu = attn.data.cpu().numpy() attn_out = b_mas(attn_cpu, in_lens.cpu().numpy(), out_lens.cpu().numpy(), width=1) return torch.from_numpy(attn_out).to(attn.get_device()) def forward(self, inputs, use_gt_pitch=True, pace=1.0, max_duration=75): (inputs, input_lens, mel_tgt, mel_lens, pitch_dense, energy_dense, speaker, attn_prior, audiopaths) = inputs mel_max_len = mel_tgt.size(2) # Calculate speaker embedding if self.speaker_emb is None: spk_emb = 0 else: spk_emb = self.speaker_emb(speaker).unsqueeze(1) spk_emb.mul_(self.speaker_emb_weight) # Input FFT enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb) # Alignment text_emb = self.encoder.word_emb(inputs) # make sure to do the alignments before folding attn_mask = mask_from_lens(input_lens)[..., None] == 0 # attn_mask should be 1 for unused timesteps in the text_enc_w_spkvec tensor attn_soft, attn_logprob = self.attention( mel_tgt, text_emb.permute(0, 2, 1), mel_lens, attn_mask, key_lens=input_lens, keys_encoded=enc_out, attn_prior=attn_prior) attn_hard = self.binarize_attention_parallel( attn_soft, input_lens, mel_lens) # Viterbi --> durations attn_hard_dur = attn_hard.sum(2)[:, 0, :] dur_tgt = attn_hard_dur assert torch.all(torch.eq(dur_tgt.sum(dim=1), mel_lens)) # Predict durations log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1) dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration) # Predict pitch pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1) # Average pitch over characters pitch_tgt = average_pitch(pitch_dense, dur_tgt) if use_gt_pitch and pitch_tgt is not None: pitch_emb = self.pitch_emb(pitch_tgt) else: pitch_emb = self.pitch_emb(pitch_pred) enc_out = enc_out + pitch_emb.transpose(1, 2) # Predict energy if self.energy_conditioning: energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1) # Average energy over characters energy_tgt = average_pitch(energy_dense.unsqueeze(1), dur_tgt) energy_tgt = torch.log(1.0 + energy_tgt) energy_emb = self.energy_emb(energy_tgt) energy_tgt = energy_tgt.squeeze(1) enc_out = enc_out + energy_emb.transpose(1, 2) else: energy_pred = None energy_tgt = None len_regulated, dec_lens = regulate_len( dur_tgt, enc_out, pace, mel_max_len) # Output FFT dec_out, dec_mask = self.decoder(len_regulated, dec_lens) mel_out = self.proj(dec_out) return (mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred, pitch_tgt, energy_pred, energy_tgt, attn_soft, attn_hard, attn_hard_dur, attn_logprob) def infer(self, inputs, pace=1.0, dur_tgt=None, pitch_tgt=None, energy_tgt=None, pitch_transform=None, max_duration=75, speaker=0): if self.speaker_emb is None: spk_emb = 0 else: speaker = (torch.ones(inputs.size(0)).long().to(inputs.device) * speaker) spk_emb = self.speaker_emb(speaker).unsqueeze(1) spk_emb.mul_(self.speaker_emb_weight) # Input FFT enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb) # Predict durations log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1) dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration) # Pitch over chars pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1) if pitch_transform is not None: if self.pitch_std[0] == 0.0: # XXX LJSpeech-1.1 defaults mean, std = 218.14, 67.24 else: mean, std = self.pitch_mean[0], self.pitch_std[0] pitch_pred = pitch_transform(pitch_pred, enc_mask.sum(dim=(1,2)), mean, std) if pitch_tgt is None: pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2) else: pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2) enc_out = enc_out + pitch_emb # Predict energy if self.energy_conditioning: if energy_tgt is None: energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1) energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2) else: energy_emb = self.energy_emb(energy_tgt).transpose(1, 2) enc_out = enc_out + energy_emb else: energy_pred = None len_regulated, dec_lens = regulate_len( dur_pred if dur_tgt is None else dur_tgt, enc_out, pace, mel_max_len=None) dec_out, dec_mask = self.decoder(len_regulated, dec_lens) mel_out = self.proj(dec_out) # mel_lens = dec_mask.squeeze(2).sum(axis=1).long() mel_out = mel_out.permute(0, 2, 1) # For inference.py return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
PyTorch/Translation/Transformer/fairseq/data
data
__init__
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # #------------------------------------------------------------------------- # # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .dictionary import Dictionary from .indexed_dataset import IndexedDataset, IndexedInMemoryDataset, IndexedRawTextDataset # noqa: F401 from .language_pair_dataset import LanguagePairDataset, load_dataset_splits from .data_utils import EpochBatchIterator
PyTorch/Forecasting/TFT/triton/runner
runner
summary
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import json import pathlib from typing import Dict, List, Union # method from PEP-366 to support relative import in executed modules import yaml if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ..deployment_toolkit.report import save_results, sort_results from .logger import LOGGER def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None: """ Create file with summary for results of given type Args: result_type: Type of results to dump results: Results data summary_dir: Path where results should be stored Returns: None """ if len(results) == 0: LOGGER.warning(f"No {result_type} results found.") return results = sort_results(results=results) kind_file = summary_dir / f"{result_type}_summary.csv" save_results(filename=kind_file.as_posix(), data=results, formatted=True) LOGGER.info(f"Summary for {result_type} stored in {kind_file}") def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List: """ Update results Args: results_path: Path to file or directory from which data should be read result_type: type of results parameters: Parameters used in experiment which generated results Returns: List of result rows """ LOGGER.debug(f"Loading {result_type} from {results_path} for summary") results_path = pathlib.Path(results_path) if results_path.is_file(): files = [results_path] elif results_path.is_dir(): files = list(results_path.iterdir()) else: LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.") data = [{}] return data if any([file.name.endswith(".ckpt") for file in files]): model_analyzer_metrics = results_path / "metrics-model-inference.csv" files = [model_analyzer_metrics] else: files = [file for file in files if file.name.endswith(".csv")] results = list() parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"} for file in files: if file.suffix == ".csv": data = _generate_data_from_csv(file=file) elif file.suffix == ".json": data = _generate_data_from_json(file=file) elif file.suffix == ".yaml": data = _generate_data_from_yaml(file=file) else: raise ValueError(f"Unsupported file extension: {file.suffix}") for item in data: result = {**parameters_cpy, **item} results.append(result) LOGGER.debug(f"Loading done. Collected {len(results)} results.") return results def _normalize_key(*, key: str) -> str: """ Normalize key Args: key: Key to normalize Returns: Normalized string """ key = "_".join(key.split(sep=" ")) key = key.lower() return key def _normalize_keys(*, data: Dict) -> Dict: """ Normalize keys in dictionary Args: data: Dictionary to normalize Returns: Normalized dictionary """ keys = {_normalize_key(key=key): value for key, value in data.items()} return keys def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]: """ Generate result rows from CSV file Args: file: CSV file path Returns: List of rows """ LOGGER.debug(f"Reading data from {file}") filtered_rows: List[Dict] = [] with open(file, "r") as csvfile: reader = csv.DictReader(csvfile) for r in reader: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.debug("done") return filtered_rows def _generate_data_from_json(file: pathlib.Path) -> List[Dict]: LOGGER.info(f"Reading data from {file}") filtered_rows: List[Dict] = list() with open(file, "r") as json_file: file_data = json.load(json_file) if not isinstance(file_data, list): file_data = [file_data] for r in file_data: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.info("done") return filtered_rows def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]: LOGGER.info(f"Reading data from {file}") filtered_rows: List[Dict] = list() with open(file, "r") as yaml_file: file_data = yaml.safe_load(yaml_file) if not isinstance(file_data, list): file_data = [file_data] for r in file_data: r = _normalize_keys(data=r) filtered_row = {k: v for k, v in r.items()} filtered_rows.append(filtered_row) LOGGER.info("done") return filtered_rows
TensorFlow2/LanguageModeling/BERT/data
data
Downloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from GooglePretrainedWeightDownloader import GooglePretrainedWeightDownloader from NVIDIAPretrainedWeightDownloader import NVIDIAPretrainedWeightDownloader from WikiDownloader import WikiDownloader from BooksDownloader import BooksDownloader from GLUEDownloader import GLUEDownloader from SquadDownloader import SquadDownloader from PubMedDownloader import PubMedDownloader class Downloader: def __init__(self, dataset_name, save_path): self.dataset_name = dataset_name self.save_path = save_path def download(self): if self.dataset_name == 'bookscorpus': self.download_bookscorpus() elif self.dataset_name == 'wikicorpus_en': self.download_wikicorpus('en') elif self.dataset_name == 'wikicorpus_zh': self.download_wikicorpus('zh') elif self.dataset_name == 'pubmed_baseline': self.download_pubmed('baseline') elif self.dataset_name == 'pubmed_daily_update': self.download_pubmed('daily_update') elif self.dataset_name == 'pubmed_fulltext': self.download_pubmed('fulltext') elif self.dataset_name == 'pubmed_open_access': self.download_pubmed('open_access') elif self.dataset_name == 'google_pretrained_weights': self.download_google_pretrained_weights() elif self.dataset_name == 'nvidia_pretrained_weights': self.download_nvidia_pretrained_weights() elif self.dataset_name == 'mrpc': self.download_glue(self.dataset_name) elif self.dataset_name == 'mnli': self.download_glue(self.dataset_name) elif self.dataset_name == 'cola': self.download_glue(self.dataset_name) elif self.dataset_name == 'sst-2': self.download_glue(self.dataset_name) elif self.dataset_name == 'squad': self.download_squad() elif self.dataset_name == 'all': self.download_bookscorpus() self.download_wikicorpus('en') self.download_wikicorpus('zh') self.download_pubmed('baseline') self.download_pubmed('daily_update') self.download_pubmed('fulltext') self.download_pubmed('open_access') self.download_google_pretrained_weights() self.download_nvidia_pretrained_weights() self.download_glue("cola") self.download_glue("mnli") self.download_glue("mrpc") self.download_glue("sst-2") self.download_squad() else: print(self.dataset_name) assert False, 'Unknown dataset_name provided to downloader' def download_bookscorpus(self): downloader = BooksDownloader(self.save_path) downloader.download() def download_wikicorpus(self, language): downloader = WikiDownloader(language, self.save_path) downloader.download() def download_pubmed(self, subset): downloader = PubMedDownloader(subset, self.save_path) downloader.download() def download_google_pretrained_weights(self): downloader = GooglePretrainedWeightDownloader(self.save_path) downloader.download() def download_nvidia_pretrained_weights(self): downloader = NVIDIAPretrainedWeightDownloader(self.save_path) downloader.download() def download_glue(self, glue_task_name): downloader = GLUEDownloader(self.save_path) downloader.download(glue_task_name) def download_squad(self): downloader = SquadDownloader(self.save_path) downloader.download()
PaddlePaddle/Classification/RN50v1.5/utils
utils
utility
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os def get_num_trainers(): num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) return num_trainers def get_trainer_id(): trainer_id = int(os.environ.get('PADDLE_TRAINER_ID', 0)) return trainer_id
PyTorch/LanguageModeling/BERT/lamb_amp_opt/fused_lamb
fused_lamb
fused_lamb
import torch from copy import deepcopy from itertools import chain from collections import defaultdict, abc as container_abcs from apex.multi_tensor_apply import multi_tensor_applier import fused_lamb_CUDA class FusedLAMBAMP(torch.optim.Optimizer): def __init__(self, params, lr=1e-3, step=0, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, weight_decay=0.01, amsgrad=False, adam_w_mode=True, grad_averaging=True, set_grad_none=True, max_grad_norm=1.0, use_nvlamb=False): if amsgrad: raise RuntimeError('FusedLAMB does not support the AMSGrad variant.') # The learning rate (lr) and optimizer step (step) should be located on device # in order to faciliated device sync free execution defaults = dict(lr=torch.tensor(lr, dtype=torch.float32, device=torch.cuda.current_device()), step=torch.tensor([step], dtype=torch.int, device=torch.cuda.current_device()), bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, grad_averaging=grad_averaging, max_grad_norm=max_grad_norm) super(FusedLAMBAMP, self).__init__(params, defaults) if multi_tensor_applier.available: import amp_C self.multi_tensor_l2norm=amp_C.multi_tensor_l2norm # Skip buffer self._dummy_overflow_buf = torch.tensor([0], dtype=torch.int, device=self.param_groups[0]["params"][0].device) self.multi_tensor_lamb = amp_C.multi_tensor_lamb else: raise RuntimeError('apex.optimizers.FusedLAMB requires cuda extensions') self._step_supports_amp_scaling = True self.param_groups_fp32 = [] self.adam_w_mode = 1 if adam_w_mode else 0 self.set_grad_none = set_grad_none self.use_nvlamb = use_nvlamb def load_state_dict(self, state_dict): r"""Loads the optimizer state. Args: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = deepcopy(state_dict) # Validate the state_dict groups = self.param_groups saved_groups = state_dict['param_groups'] if len(groups) != len(saved_groups): raise ValueError("loaded state dict has a different number of " "parameter groups") param_lens = (len(g['params']) for g in groups) saved_lens = (len(g['params']) for g in saved_groups) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError("loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group") # Update the state id_map = {old_id: p for old_id, p in zip(chain.from_iterable((g['params'] for g in saved_groups)), chain.from_iterable((g['params'] for g in groups)))} def cast(param, value): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. if param.is_floating_point(): value = value.to(value.dtype) value = value.to(value.device) return value elif isinstance(value, dict): return {k: cast(param, v) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). state = defaultdict(dict) for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] state[param] = cast(param, v) else: state[k] = v # Update parameter groups, setting their 'params' value def update_group(group, new_group): new_group['params'] = group['params'] return new_group param_groups = [ update_group(g, ng) for g, ng in zip(groups, saved_groups)] self.__setstate__({'state': state, 'param_groups': param_groups}) def setup_fp32_params(self): for i, pg in enumerate(self.param_groups): param_list = pg['params'] self.param_groups_fp32.append({ 'params': [ p.clone().detach().float() if p.dtype == torch.half else None for p in param_list ], }) def zero_grad(self, set_to_none=False): for group in self.param_groups: for p in group['params']: if set_to_none: p.grad = None else: if p.grad.grad_fn is not None: p.grad.detach_() else: p.grad.requires_grad_(False) p.grad.zero_() @torch.no_grad() def step(self, closure=None, grad_scaler=None): loss = None if closure is not None: loss = closure() # create separate grad lists for fp32 and fp16 params g_all_32, g_all_16 = [], [] for gid, (group, fp32_group) in enumerate(zip(self.param_groups, self.param_groups_fp32)): for pid, (p, fp32_p) in enumerate(zip(group['params'], fp32_group['params'])): if p.grad is None: continue assert p.dtype in (torch.float16, torch.float32) if p.dtype == torch.float32: g_all_32.append(p.grad) else: # p.dtype == torch.float16: g_all_16.append(p.grad) device = self.param_groups[0]["params"][0].device found_inf = ( grad_scaler._check_inf_per_device(self)[device] if grad_scaler is not None else torch.zeros((1,), device=device) ) self._dummy_overflow_buf.copy_(found_inf) scale, inv_scale = None, None if grad_scaler: scale = grad_scaler._get_scale_async() inv_scale = scale.double().reciprocal().float() else: scale = torch.ones((1,), device=device) inv_scale = torch.ones((1,), device=device) # g_norm_32, g_norm_16 = torch.zeros(1, device=device), torch.zeros(1, device=device) g_norm_32, g_norm_16 = None, None # compute grad norm for two lists # NOTE(mkozuki): g_all_16, g_all_32, and global_grad_norm are norms of scaled gradients. # So, multiply `max_grad_norm` by scale. max_grad_norm = self.defaults['max_grad_norm'] * scale if len(g_all_32) > 0: g_norm_32 = multi_tensor_applier( fused_lamb_CUDA.multi_tensor_l2norm, self._dummy_overflow_buf, [g_all_32], False, )[0] else: g_norm_32 = torch.zeros((1,), device=device) if len(g_all_16) > 0: g_norm_16 = multi_tensor_applier( fused_lamb_CUDA.multi_tensor_l2norm, self._dummy_overflow_buf, [g_all_16], False, )[0] else: g_norm_16 = torch.zeros((1,), device=device) # blend two grad norms to get global grad norm global_grad_norm = multi_tensor_applier( fused_lamb_CUDA.multi_tensor_l2norm, self._dummy_overflow_buf, [[g_norm_32, g_norm_16]], False, )[0] # Run LAMB optimization math for gid, (group, fp32_group) in enumerate(zip(self.param_groups, self.param_groups_fp32)): bias_correction = 1 if group['bias_correction'] else 0 beta1, beta2 = group['betas'] grad_averaging = 1 if group['grad_averaging'] else 0 # assume same step across group now to simplify things # per parameter step can be easily support by making it tensor, or pass list into kernel if 'step' in group: group['step'] += (self._dummy_overflow_buf != 1).int() else: group['step'] = (self._dummy_overflow_buf != 1).int() # create lists for multi-tensor apply g_16, p_16, m_16, v_16, dst_param_fp16 = [], [], [], [], [] g_32, p_32, m_32, v_32 = [], [], [], [] for p, p_fp32 in zip(group['params'], fp32_group['params']): if p.grad is None: continue assert not p.grad.is_sparse state = self.state[p] # State initialization if len(state) == 0: dtype = torch.float if p.dtype == torch.half else p.dtype # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data, dtype=dtype) # Exponential moving average of gradient values state['exp_avg_sq'] = torch.zeros_like(p.data, dtype=dtype) if p.dtype == torch.float16: g_16.append(p.grad.data) p_16.append(p_fp32.data) m_16.append(state['exp_avg']) v_16.append(state['exp_avg_sq']) dst_param_fp16.append(p.data) elif p.dtype == torch.float32: assert p_fp32 is None g_32.append(p.grad.data) p_32.append(p.data) m_32.append(state['exp_avg']) v_32.append(state['exp_avg_sq']) else: raise RuntimeError('FusedLAMB only support fp16 and fp32.') if g_16: multi_tensor_applier( fused_lamb_CUDA.multi_tensor_lamb, self._dummy_overflow_buf, [g_16, p_16, m_16, v_16, dst_param_fp16], group['lr'], beta1, beta2, group['eps'], group['step'], bias_correction, group['weight_decay'], grad_averaging, self.adam_w_mode, global_grad_norm, max_grad_norm, self.use_nvlamb, found_inf, inv_scale) if g_32: multi_tensor_applier( fused_lamb_CUDA.multi_tensor_lamb, self._dummy_overflow_buf, [g_32, p_32, m_32, v_32], group['lr'], beta1, beta2, group['eps'], group['step'], bias_correction, group['weight_decay'], grad_averaging, self.adam_w_mode, global_grad_norm, max_grad_norm, self.use_nvlamb, found_inf, inv_scale) return loss def add_param_group(self, param_group): r"""Add a param group to the :class:`Optimizer` s `param_groups`. This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the :class:`Optimizer` as training progresses. Args: param_group (dict): Specifies what Tensors should be optimized along with group specific optimization options. """ assert isinstance(param_group, dict), "param group must be a dict" params = param_group['params'] if isinstance(params, torch.Tensor): param_group['params'] = [params] elif isinstance(params, set): raise TypeError('optimizer parameters need to be organized in ordered collections, but ' 'the ordering of tensors in sets will change between runs. Please use a list instead.') else: param_group['params'] = list(params) for param in param_group['params']: if not isinstance(param, torch.Tensor): raise TypeError("optimizer can only optimize Tensors, " "but one of the params is " + torch.typename(param)) if not param.is_leaf: raise ValueError("can't optimize a non-leaf Tensor") for name, default in self.defaults.items(): if isinstance(default, torch.Tensor) : param_group.setdefault(name, deepcopy(default)) else : param_group.setdefault(name, default) params = param_group['params'] if len(params) != len(set(params)): warnings.warn("optimizer contains a parameter group with duplicate parameters; " "in future, this will cause an error; " "see github.com/pytorch/pytorch/issues/40967 for more information", stacklevel=3) param_set = set() for group in self.param_groups: param_set.update(set(group['params'])) if not param_set.isdisjoint(set(param_group['params'])): raise ValueError("some parameters appear in more than one parameter group") self.param_groups.append(param_group)
TensorFlow2/Classification/ConvNets
ConvNets
.gitignore
config/efficientnet_v2/s_cfg_fumb.py config/efficientnet_v2/s_cfg_mb.py config/efficientnet_v1/b4_allfumb_cfg.py config/efficientnet_v1/b4_mixed_cfg.py debug.py output/ src/ .vscode/ eval/ log.json # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # mypy .mypy_cache # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject # PyCharm .idea/ # For mac .DS_Store
PyTorch/LanguageModeling/BERT/triton/large/runner
runner
config_NVIDIA-DGX-A100-(1x-A100-80GB)
checkpoints: - name: large-qa url: https://api.ngc.nvidia.com/v2/models/nvidia/bert_pyt_ckpt_large_qa_squad11_amp/versions/19.09.0/zip configurations: - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: trt accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: trt accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: trt accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: onnx max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 batch_sizes: '1' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 1 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: '1' - accelerator: none accelerator_precision: fp16 batch_size: - 16 batch_sizes: '16' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 - accelerator: none accelerator_precision: fp16 batch_size: - 8 batch_sizes: '8' capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: onnx export_precision: fp16 format: trt max_batch_size: 8 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 4 8 - accelerator: none accelerator_precision: fp16 batch_size: - 1 - 8 - 16 batch_sizes: 1 8 16 capture_cuda_graph: 0 checkpoint_variant: large-qa export_format: ts-trace export_precision: fp16 format: ts-trace max_batch_size: 16 max_seq_length: 384 precision: fp16 triton_gpu_engine_count: 1 triton_max_queue_delay: 1 triton_preferred_batch_sizes: 8 16 container_version: '21.10' datasets: - name: data datasets_dir: datasets framework: PyTorch model_name: BERT triton_container_image: null triton_custom_operations: null triton_dockerfile: null triton_load_model_method: explicit
TensorFlow2/Classification/ConvNets/efficientnet_v1/B4/evaluation
evaluation
evaluation_TF32_A100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python3 main.py --cfg config/efficientnet_v1/b4_cfg.py \ --mode eval \ --use_xla \ --model_dir ./output \ --data_dir /data \ --eval_batch_size 64 \ --eval_img_size 380 \ --memory_limit 81000
TensorFlow/Detection/SSD/models/research/object_detection/core
core
preprocessor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Preprocess images and bounding boxes for detection. We perform two sets of operations in preprocessing stage: (a) operations that are applied to both training and testing data, (b) operations that are applied only to training data for the purpose of data augmentation. A preprocessing function receives a set of inputs, e.g. an image and bounding boxes, performs an operation on them, and returns them. Some examples are: randomly cropping the image, randomly mirroring the image, randomly changing the brightness, contrast, hue and randomly jittering the bounding boxes. The preprocess function receives a tensor_dict which is a dictionary that maps different field names to their tensors. For example, tensor_dict[fields.InputDataFields.image] holds the image tensor. The image is a rank 4 tensor: [1, height, width, channels] with dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where in each row there is a box with [ymin xmin ymax xmax]. Boxes are in normalized coordinates meaning their coordinate values range in [0, 1] To preprocess multiple images with the same operations in cases where nondeterministic operations are used, a preprocessor_cache.PreprocessorCache object can be passed into the preprocess function or individual operations. All nondeterministic operations except random_jitter_boxes support caching. E.g. Let tensor_dict{1,2,3,4,5} be copies of the same inputs. Let preprocess_options contain nondeterministic operation(s) excluding random_jitter_boxes. cache1 = preprocessor_cache.PreprocessorCache() cache2 = preprocessor_cache.PreprocessorCache() a = preprocess(tensor_dict1, preprocess_options, preprocess_vars_cache=cache1) b = preprocess(tensor_dict2, preprocess_options, preprocess_vars_cache=cache1) c = preprocess(tensor_dict3, preprocess_options, preprocess_vars_cache=cache2) d = preprocess(tensor_dict4, preprocess_options, preprocess_vars_cache=cache2) e = preprocess(tensor_dict5, preprocess_options) Then correspondings tensors of object pairs (a,b) and (c,d) are guaranteed to be equal element-wise, but the equality of any other object pair cannot be determined. Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing functions receive a rank 3 tensor for processing the image. Thus, inside the preprocess function we squeeze the image to become a rank 3 tensor and then we pass it to the functions. At the end of the preprocess we expand the image back to rank 4. """ import functools import inspect import sys import tensorflow as tf from tensorflow.python.ops import control_flow_ops from object_detection.core import box_list from object_detection.core import box_list_ops from object_detection.core import keypoint_ops from object_detection.core import preprocessor_cache from object_detection.core import standard_fields as fields from object_detection.utils import shape_utils def _apply_with_random_selector(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. return control_flow_ops.merge([func( control_flow_ops.switch(x, tf.equal(rand_sel, case))[1], case) for case in range(num_cases)])[0] def _apply_with_random_selector_tuples(x, func, num_cases, preprocess_vars_cache=None, key=''): """Computes func(x, sel), with sel sampled from [0...num_cases-1]. If both preprocess_vars_cache AND key are the same between two calls, sel will be the same value in both calls. Args: x: A tuple of input tensors. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: variable identifier for preprocess_vars_cache. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """ num_inputs = len(x) generator_func = functools.partial( tf.random_uniform, [], maxval=num_cases, dtype=tf.int32) rand_sel = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.SELECTOR_TUPLES, preprocess_vars_cache, key) # Pass the real x only to one of the func calls. tuples = [list() for t in x] for case in range(num_cases): new_x = [control_flow_ops.switch(t, tf.equal(rand_sel, case))[1] for t in x] output = func(tuple(new_x), case) for j in range(num_inputs): tuples[j].append(output[j]) for i in range(num_inputs): tuples[i] = control_flow_ops.merge(tuples[i])[0] return tuple(tuples) def _get_or_create_preprocess_rand_vars(generator_func, function_id, preprocess_vars_cache, key=''): """Returns a tensor stored in preprocess_vars_cache or using generator_func. If the tensor was previously generated and appears in the PreprocessorCache, the previously generated tensor will be returned. Otherwise, a new tensor is generated using generator_func and stored in the cache. Args: generator_func: A 0-argument function that generates a tensor. function_id: identifier for the preprocessing function used. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. key: identifier for the variable stored. Returns: The generated tensor. """ if preprocess_vars_cache is not None: var = preprocess_vars_cache.get(function_id, key) if var is None: var = generator_func() preprocess_vars_cache.update(function_id, key, var) else: var = generator_func() return var def _random_integer(minval, maxval, seed): """Returns a random 0-D tensor between minval and maxval. Args: minval: minimum value of the random tensor. maxval: maximum value of the random tensor. seed: random seed. Returns: A random 0-D tensor between minval and maxval. """ return tf.random_uniform( [], minval=minval, maxval=maxval, dtype=tf.int32, seed=seed) # TODO(mttang): This method is needed because the current # tf.image.rgb_to_grayscale method does not support quantization. Replace with # tf.image.rgb_to_grayscale after quantization support is added. def _rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. Args: images: The RGB tensor to convert. Last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with tf.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = tf.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = tf.image.convert_image_dtype(images, tf.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] rank_1 = tf.expand_dims(tf.rank(images) - 1, 0) gray_float = tf.reduce_sum( flt_image * rgb_weights, rank_1, keep_dims=True) gray_float.set_shape(images.get_shape()[:-1].concatenate([1])) return tf.image.convert_image_dtype(gray_float, orig_dtype, name=name) def normalize_image(image, original_minval, original_maxval, target_minval, target_maxval): """Normalizes pixel values in the image. Moves the pixel values from the current [original_minval, original_maxval] range to a the [target_minval, target_maxval] range. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels]. original_minval: current image minimum value. original_maxval: current image maximum value. target_minval: target image minimum value. target_maxval: target image maximum value. Returns: image: image which is the same shape as input image. """ with tf.name_scope('NormalizeImage', values=[image]): original_minval = float(original_minval) original_maxval = float(original_maxval) target_minval = float(target_minval) target_maxval = float(target_maxval) image = tf.to_float(image) image = tf.subtract(image, original_minval) image = tf.multiply(image, (target_maxval - target_minval) / (original_maxval - original_minval)) image = tf.add(image, target_minval) return image def retain_boxes_above_threshold(boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, threshold=0.0): """Retains boxes whose label weight is above a given threshold. If the label weight for a box is missing (represented by NaN), the box is retained. The boxes that don't pass the threshold will not appear in the returned tensor. Args: boxes: float32 tensor of shape [num_instance, 4] representing boxes location in normalized coordinates. labels: rank 1 int32 tensor of shape [num_instance] containing the object classes. label_weights: float32 tensor of shape [num_instance] representing the weight for each box. label_confidences: float32 tensor of shape [num_instance] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. threshold: scalar python float. Returns: retained_boxes: [num_retained_instance, 4] retianed_labels: [num_retained_instance] retained_label_weights: [num_retained_instance] If multiclass_scores, masks, or keypoints are not None, the function also returns: retained_multiclass_scores: [num_retained_instance, num_classes] retained_masks: [num_retained_instance, height, width] retained_keypoints: [num_retained_instance, num_keypoints, 2] """ with tf.name_scope('RetainBoxesAboveThreshold', values=[boxes, labels, label_weights]): indices = tf.where( tf.logical_or(label_weights > threshold, tf.is_nan(label_weights))) indices = tf.squeeze(indices, axis=1) retained_boxes = tf.gather(boxes, indices) retained_labels = tf.gather(labels, indices) retained_label_weights = tf.gather(label_weights, indices) result = [retained_boxes, retained_labels, retained_label_weights] if label_confidences is not None: retained_label_confidences = tf.gather(label_confidences, indices) result.append(retained_label_confidences) if multiclass_scores is not None: retained_multiclass_scores = tf.gather(multiclass_scores, indices) result.append(retained_multiclass_scores) if masks is not None: retained_masks = tf.gather(masks, indices) result.append(retained_masks) if keypoints is not None: retained_keypoints = tf.gather(keypoints, indices) result.append(retained_keypoints) return result def _flip_boxes_left_right(boxes): """Left-right flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_xmin = tf.subtract(1.0, xmax) flipped_xmax = tf.subtract(1.0, xmin) flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1) return flipped_boxes def _flip_boxes_up_down(boxes): """Up-down flip the boxes. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Flipped boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) flipped_ymin = tf.subtract(1.0, ymax) flipped_ymax = tf.subtract(1.0, ymin) flipped_boxes = tf.concat([flipped_ymin, xmin, flipped_ymax, xmax], 1) return flipped_boxes def _rot90_boxes(boxes): """Rotate boxes counter-clockwise by 90 degrees. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. Returns: Rotated boxes. """ ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1) rotated_ymin = tf.subtract(1.0, xmax) rotated_ymax = tf.subtract(1.0, xmin) rotated_xmin = ymin rotated_xmax = ymax rotated_boxes = tf.concat( [rotated_ymin, rotated_xmin, rotated_ymax, rotated_xmax], 1) return rotated_boxes def _flip_masks_left_right(masks): """Left-right flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, :, ::-1] def _flip_masks_up_down(masks): """Up-down flip masks. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: flipped masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ return masks[:, ::-1, :] def _rot90_masks(masks): """Rotate masks counter-clockwise by 90 degrees. Args: masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. Returns: rotated masks: rank 3 float32 tensor with shape [num_instances, height, width] representing instance masks. """ masks = tf.transpose(masks, [0, 2, 1]) return masks[:, ::-1, :] def random_horizontal_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections horizontally. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_left_right(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.HORIZONTAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.greater(do_a_flip_random, 0.5) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_horizontal(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_vertical_flip(image, boxes=None, masks=None, keypoints=None, keypoint_flip_permutation=None, seed=None, preprocess_vars_cache=None): """Randomly flips the image and detections vertically. The probability of flipping the image is 50%. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip permutation. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, keypoints, and keypoint_flip_permutation are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: if keypoints are provided but keypoint_flip_permutation is not. """ def _flip_image(image): # flip image image_flipped = tf.image.flip_up_down(image) return image_flipped if keypoints is not None and keypoint_flip_permutation is None: raise ValueError( 'keypoints are provided but keypoints_flip_permutation is not provided') with tf.name_scope('RandomVerticalFlip', values=[image, boxes]): result = [] # random variable defining whether to do flip or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_flip_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.VERTICAL_FLIP, preprocess_vars_cache) do_a_flip_random = tf.greater(do_a_flip_random, 0.5) # flip image image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_up_down(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_flip_random, lambda: _flip_masks_up_down(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None and keypoint_flip_permutation is not None: permutation = keypoint_flip_permutation keypoints = tf.cond( do_a_flip_random, lambda: keypoint_ops.flip_vertical(keypoints, 0.5, permutation), lambda: keypoints) result.append(keypoints) return tuple(result) def random_rotation90(image, boxes=None, masks=None, keypoints=None, seed=None, preprocess_vars_cache=None): """Randomly rotates the image and detections 90 degrees counter-clockwise. The probability of rotating the image is 50%. This can be combined with random_horizontal_flip and random_vertical_flip to produce an output with a uniform distribution of the eight possible 90 degree rotation / reflection combinations. Args: image: rank 3 float32 tensor with shape [height, width, channels]. boxes: (optional) rank 2 float32 tensor with shape [N, 4] containing the bounding boxes. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. seed: random seed preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. If boxes, masks, and keypoints, are not None, the function also returns the following tensors. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def _rot90_image(image): # flip image image_rotated = tf.image.rot90(image) return image_rotated with tf.name_scope('RandomRotation90', values=[image, boxes]): result = [] # random variable defining whether to rotate by 90 degrees or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_rot90_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ROTATION90, preprocess_vars_cache) do_a_rot90_random = tf.greater(do_a_rot90_random, 0.5) # flip image image = tf.cond(do_a_rot90_random, lambda: _rot90_image(image), lambda: image) result.append(image) # flip boxes if boxes is not None: boxes = tf.cond(do_a_rot90_random, lambda: _rot90_boxes(boxes), lambda: boxes) result.append(boxes) # flip masks if masks is not None: masks = tf.cond(do_a_rot90_random, lambda: _rot90_masks(masks), lambda: masks) result.append(masks) # flip keypoints if keypoints is not None: keypoints = tf.cond( do_a_rot90_random, lambda: keypoint_ops.rot90(keypoints), lambda: keypoints) result.append(keypoints) return tuple(result) def random_pixel_value_scale(image, minval=0.9, maxval=1.1, seed=None, preprocess_vars_cache=None): """Scales each value in the pixels of the image. This function scales each pixel independent of the other ones. For each value in image tensor, draws a random number between minval and maxval and multiples the values with them. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. minval: lower ratio of scaling pixel values. maxval: upper ratio of scaling pixel values. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomPixelValueScale', values=[image]): generator_func = functools.partial( tf.random_uniform, tf.shape(image), minval=minval, maxval=maxval, dtype=tf.float32, seed=seed) color_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PIXEL_VALUE_SCALE, preprocess_vars_cache) image = tf.multiply(image, color_coef) image = tf.clip_by_value(image, 0.0, 255.0) return image def random_image_scale(image, masks=None, min_scale_ratio=0.5, max_scale_ratio=2.0, seed=None, preprocess_vars_cache=None): """Scales the image size. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels]. masks: (optional) rank 3 float32 tensor containing masks with size [height, width, num_masks]. The value is set to None if there are no masks. min_scale_ratio: minimum scaling ratio. max_scale_ratio: maximum scaling ratio. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. masks: If masks is not none, resized masks which are the same rank as input masks will be returned. """ with tf.name_scope('RandomImageScale', values=[image]): result = [] image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] generator_func = functools.partial( tf.random_uniform, [], minval=min_scale_ratio, maxval=max_scale_ratio, dtype=tf.float32, seed=seed) size_coef = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.IMAGE_SCALE, preprocess_vars_cache) image_newysize = tf.to_int32( tf.multiply(tf.to_float(image_height), size_coef)) image_newxsize = tf.to_int32( tf.multiply(tf.to_float(image_width), size_coef)) image = tf.image.resize_images( image, [image_newysize, image_newxsize], align_corners=True) result.append(image) if masks is not None: masks = tf.image.resize_images( masks, [image_newysize, image_newxsize], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=True) result.append(masks) return tuple(result) def random_rgb_to_gray(image, probability=0.1, seed=None, preprocess_vars_cache=None): """Changes the image from RGB to Grayscale with the given probability. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. probability: the probability of returning a grayscale image. The probability should be a number between [0, 1]. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ def _image_to_gray(image): image_gray1 = _rgb_to_grayscale(image) image_gray3 = tf.image.grayscale_to_rgb(image_gray1) return image_gray3 with tf.name_scope('RandomRGBtoGray', values=[image]): # random variable defining whether to change to grayscale or not generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_gray_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.RGB_TO_GRAY, preprocess_vars_cache) image = tf.cond( tf.greater(do_gray_random, probability), lambda: image, lambda: _image_to_gray(image)) return image def random_adjust_brightness(image, max_delta=0.2, seed=None, preprocess_vars_cache=None): """Randomly adjusts brightness. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: how much to change the brightness. A value between [0, 1). seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. boxes: boxes which is the same shape as input boxes. """ with tf.name_scope('RandomAdjustBrightness', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_BRIGHTNESS, preprocess_vars_cache) image = tf.image.adjust_brightness(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_contrast(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts contrast. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the contrast. Contrast will change with a value between min_delta and max_delta. This value will be multiplied to the current contrast of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustContrast', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) contrast_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_CONTRAST, preprocess_vars_cache) image = tf.image.adjust_contrast(image / 255, contrast_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_hue(image, max_delta=0.02, seed=None, preprocess_vars_cache=None): """Randomly adjusts hue. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. max_delta: change hue randomly with a value between 0 and max_delta. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustHue', values=[image]): generator_func = functools.partial(tf.random_uniform, [], -max_delta, max_delta, seed=seed) delta = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_HUE, preprocess_vars_cache) image = tf.image.adjust_hue(image / 255, delta) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_adjust_saturation(image, min_delta=0.8, max_delta=1.25, seed=None, preprocess_vars_cache=None): """Randomly adjusts saturation. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. min_delta: see max_delta. max_delta: how much to change the saturation. Saturation will change with a value between min_delta and max_delta. This value will be multiplied to the current saturation of the image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. """ with tf.name_scope('RandomAdjustSaturation', values=[image]): generator_func = functools.partial(tf.random_uniform, [], min_delta, max_delta, seed=seed) saturation_factor = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADJUST_SATURATION, preprocess_vars_cache) image = tf.image.adjust_saturation(image / 255, saturation_factor) * 255 image = tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=255.0) return image def random_distort_color(image, color_ordering=0, preprocess_vars_cache=None): """Randomly distorts color. Randomly distorts color using a combination of brightness, hue, contrast and saturation changes. Makes sure the output image is still between 0 and 255. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 255]. color_ordering: Python int, a type of distortion (valid values: 0, 1). preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same shape as input image. Raises: ValueError: if color_ordering is not in {0, 1}. """ with tf.name_scope('RandomDistortColor', values=[image]): if color_ordering == 0: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) elif color_ordering == 1: image = random_adjust_brightness( image, max_delta=32. / 255., preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_contrast( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_saturation( image, min_delta=0.5, max_delta=1.5, preprocess_vars_cache=preprocess_vars_cache) image = random_adjust_hue( image, max_delta=0.2, preprocess_vars_cache=preprocess_vars_cache) else: raise ValueError('color_ordering must be in {0, 1}') return image def random_jitter_boxes(boxes, ratio=0.05, seed=None): """Randomly jitter boxes in image. Args: boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. ratio: The ratio of the box width and height that the corners can jitter. For example if the width is 100 pixels and ratio is 0.05, the corners can jitter up to 5 pixels in the x direction. seed: random seed. Returns: boxes: boxes which is the same shape as input boxes. """ def random_jitter_box(box, ratio, seed): """Randomly jitter box. Args: box: bounding box [1, 1, 4]. ratio: max ratio between jittered box and original box, a number between [0, 0.5]. seed: random seed. Returns: jittered_box: jittered box. """ rand_numbers = tf.random_uniform( [1, 1, 4], minval=-ratio, maxval=ratio, dtype=tf.float32, seed=seed) box_width = tf.subtract(box[0, 0, 3], box[0, 0, 1]) box_height = tf.subtract(box[0, 0, 2], box[0, 0, 0]) hw_coefs = tf.stack([box_height, box_width, box_height, box_width]) hw_rand_coefs = tf.multiply(hw_coefs, rand_numbers) jittered_box = tf.add(box, hw_rand_coefs) jittered_box = tf.clip_by_value(jittered_box, 0.0, 1.0) return jittered_box with tf.name_scope('RandomJitterBoxes', values=[boxes]): # boxes are [N, 4]. Lets first make them [N, 1, 1, 4] boxes_shape = tf.shape(boxes) boxes = tf.expand_dims(boxes, 1) boxes = tf.expand_dims(boxes, 2) distorted_boxes = tf.map_fn( lambda x: random_jitter_box(x, ratio, seed), boxes, dtype=tf.float32) distorted_boxes = tf.reshape(distorted_boxes, boxes_shape) return distorted_boxes def _strict_random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, preprocess_vars_cache=None): """Performs random crop. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. This function always crops the image and is supposed to be used by `random_crop_image` function which sometimes returns the image unchanged. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ with tf.name_scope('RandomCropImage', values=[image, boxes]): image_shape = tf.shape(image) # boxes are [N, 4]. Lets first make them [N, 1, 4]. boxes_expanded = tf.expand_dims( tf.clip_by_value( boxes, clip_value_min=0.0, clip_value_max=1.0), 1) generator_func = functools.partial( tf.image.sample_distorted_bounding_box, image_shape, bounding_boxes=boxes_expanded, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=100, use_image_if_no_bounding_boxes=True) # for ssd cropping, each value of min_object_covered has its own # cached random variable sample_distorted_bounding_box = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.STRICT_CROP_IMAGE, preprocess_vars_cache, key=min_object_covered) im_box_begin, im_box_size, im_box = sample_distorted_bounding_box new_image = tf.slice(image, im_box_begin, im_box_size) new_image.set_shape([None, None, image.get_shape()[2]]) # [1, 4] im_box_rank2 = tf.squeeze(im_box, squeeze_dims=[0]) # [4] im_box_rank1 = tf.squeeze(im_box) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) if label_weights is not None: boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(im_box_rank2) # remove boxes that are outside cropped image boxlist, inside_window_ids = box_list_ops.prune_completely_outside_window( boxlist, im_box_rank1) # remove boxes that are outside image overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box_rank1) new_boxes = new_boxlist.get() if clip_boxes: new_boxes = tf.clip_by_value( new_boxes, clip_value_min=0.0, clip_value_max=1.0) result = [new_image, new_boxes, new_labels] if label_weights is not None: new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = overlapping_boxlist.get_field('label_confidences') result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_of_boxes_inside_window = tf.gather(masks, inside_window_ids) masks_of_boxes_completely_inside_window = tf.gather( masks_of_boxes_inside_window, keep_ids) masks_box_begin = [0, im_box_begin[0], im_box_begin[1]] masks_box_size = [-1, im_box_size[0], im_box_size[1]] new_masks = tf.slice( masks_of_boxes_completely_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if keypoints is not None: keypoints_of_boxes_inside_window = tf.gather(keypoints, inside_window_ids) keypoints_of_boxes_completely_inside_window = tf.gather( keypoints_of_boxes_inside_window, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_of_boxes_completely_inside_window, im_box_rank1) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) return tuple(result) def random_crop_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, seed=None, preprocess_vars_cache=None): """Randomly crops the image. Given the input image and its bounding boxes, this op randomly crops a subimage. Given a user-provided set of input constraints, the crop window is resampled until it satisfies these constraints. If within 100 trials it is unable to find a valid crop, the original image is returned. See the Args section for a description of the input constraints. Both input boxes and returned Boxes are in normalized form (e.g., lie in the unit square [0, 1]). This function will return the original image with probability random_coef. Note: Keypoint coordinates that are outside the crop will be set to NaN, which is consistent with the original keypoint encoding for non-existing keypoints. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes with shape [num_instances, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances]. representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def strict_random_crop_image_fn(): return _strict_random_crop_image( image, boxes, labels, label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, preprocess_vars_cache=preprocess_vars_cache) # avoids tf.cond to make faster RCNN training on borg. See b/140057645. if random_coef < sys.float_info.min: result = strict_random_crop_image_fn() else: generator_func = functools.partial(tf.random_uniform, [], seed=seed) do_a_crop_random = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_IMAGE, preprocess_vars_cache) do_a_crop_random = tf.greater(do_a_crop_random, random_coef) outputs = [image, boxes, labels] if label_weights is not None: outputs.append(label_weights) if label_confidences is not None: outputs.append(label_confidences) if multiclass_scores is not None: outputs.append(multiclass_scores) if masks is not None: outputs.append(masks) if keypoints is not None: outputs.append(keypoints) result = tf.cond(do_a_crop_random, strict_random_crop_image_fn, lambda: tuple(outputs)) return result def random_pad_image(image, boxes, min_image_size=None, max_image_size=None, pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly pads the image. This function randomly pads the image with zeros. The final size of the padded image will be between min_image_size and max_image_size. if min_image_size is smaller than the input image size, min_image_size will be set to the input image size. The same for max_image_size. The input image will be located at a uniformly random location inside the padded image. The relative location of the boxes to the original image will remain the same. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. min_image_size: a tensor of size [min_height, min_width], type tf.int32. If passed as None, will be set to image size [height, width]. max_image_size: a tensor of size [max_height, max_width], type tf.int32. If passed as None, will be set to twice the image [height * 2, width * 2]. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the input image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. """ if pad_color is None: pad_color = tf.reduce_mean(image, axis=[0, 1]) image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] if max_image_size is None: max_image_size = tf.stack([image_height * 2, image_width * 2]) max_image_size = tf.maximum(max_image_size, tf.stack([image_height, image_width])) if min_image_size is None: min_image_size = tf.stack([image_height, image_width]) min_image_size = tf.maximum(min_image_size, tf.stack([image_height, image_width])) target_height = tf.cond( max_image_size[0] > min_image_size[0], lambda: _random_integer(min_image_size[0], max_image_size[0], seed), lambda: max_image_size[0]) target_width = tf.cond( max_image_size[1] > min_image_size[1], lambda: _random_integer(min_image_size[1], max_image_size[1], seed), lambda: max_image_size[1]) offset_height = tf.cond( target_height > image_height, lambda: _random_integer(0, target_height - image_height, seed), lambda: tf.constant(0, dtype=tf.int32)) offset_width = tf.cond( target_width > image_width, lambda: _random_integer(0, target_width - image_width, seed), lambda: tf.constant(0, dtype=tf.int32)) gen_func = lambda: (target_height, target_width, offset_height, offset_width) params = _get_or_create_preprocess_rand_vars( gen_func, preprocessor_cache.PreprocessorCache.PAD_IMAGE, preprocess_vars_cache) target_height, target_width, offset_height, offset_width = params new_image = tf.image.pad_to_bounding_box( image, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) # Setting color of the padded pixels image_ones = tf.ones_like(image) image_ones_padded = tf.image.pad_to_bounding_box( image_ones, offset_height=offset_height, offset_width=offset_width, target_height=target_height, target_width=target_width) image_color_padded = (1.0 - image_ones_padded) * pad_color new_image += image_color_padded # setting boxes new_window = tf.to_float( tf.stack([ -offset_height, -offset_width, target_height - offset_height, target_width - offset_width ])) new_window /= tf.to_float( tf.stack([image_height, image_width, image_height, image_width])) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, new_window) new_boxes = new_boxlist.get() return new_image, new_boxes def random_crop_pad_image(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=1.0, aspect_ratio_range=(0.75, 1.33), area_range=(0.1, 1.0), overlap_thresh=0.3, clip_boxes=True, random_coef=0.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), pad_color=None, seed=None, preprocess_vars_cache=None): """Randomly crops and pads the image. Given an input image and its bounding boxes, this op first randomly crops the image and then randomly pads the image with background values. Parameters min_padded_size_ratio and max_padded_size_ratio, determine the range of the final output image size. Specifically, the final image size will have a size in the range of min_padded_size_ratio * tf.shape(image) and max_padded_size_ratio * tf.shape(image). Note that these ratios are with respect to the size of the original image, so we can't capture the same effect easily by independently applying RandomCropImage followed by RandomPadImage. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 containing the label weights. label_confidences: rank 1 float32 containing the label confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: padded_image: padded image. padded_boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. cropped_labels: cropped labels. if label_weights is not None also returns: cropped_label_weights: cropped label weights. if multiclass_scores is not None also returns: cropped_multiclass_scores: cropped_multiclass_scores. """ image_size = tf.shape(image) image_height = image_size[0] image_width = image_size[1] result = random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_image, cropped_boxes, cropped_labels = result[:3] min_image_size = tf.to_int32( tf.to_float(tf.stack([image_height, image_width])) * min_padded_size_ratio) max_image_size = tf.to_int32( tf.to_float(tf.stack([image_height, image_width])) * max_padded_size_ratio) padded_image, padded_boxes = random_pad_image( cropped_image, cropped_boxes, min_image_size=min_image_size, max_image_size=max_image_size, pad_color=pad_color, seed=seed, preprocess_vars_cache=preprocess_vars_cache) cropped_padded_output = (padded_image, padded_boxes, cropped_labels) index = 3 if label_weights is not None: cropped_label_weights = result[index] cropped_padded_output += (cropped_label_weights,) index += 1 if label_confidences is not None: cropped_label_confidences = result[index] cropped_padded_output += (cropped_label_confidences,) index += 1 if multiclass_scores is not None: cropped_multiclass_scores = result[index] cropped_padded_output += (cropped_multiclass_scores,) return cropped_padded_output def random_crop_to_aspect_ratio(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, aspect_ratio=1.0, overlap_thresh=0.3, clip_boxes=True, seed=None, preprocess_vars_cache=None): """Randomly crops an image to the specified aspect ratio. Randomly crops the a portion of the image such that the crop is of the specified aspect ratio, and the crop is as large as possible. If the specified aspect ratio is larger than the aspect ratio of the image, this op will randomly remove rows from the top and bottom of the image. If the specified aspect ratio is less than the aspect ratio of the image, this op will randomly remove cols from the left and right of the image. If the specified aspect ratio is the same as the aspect ratio of the image, this op will return the image. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: the aspect ratio of cropped image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, masks, keypoints, or multiclass_scores is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomCropToAspectRatio', values=[image]): image_shape = tf.shape(image) orig_height = image_shape[0] orig_width = image_shape[1] orig_aspect_ratio = tf.to_float(orig_width) / tf.to_float(orig_height) new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) def target_height_fn(): return tf.to_int32(tf.round(tf.to_float(orig_width) / new_aspect_ratio)) target_height = tf.cond(orig_aspect_ratio >= new_aspect_ratio, lambda: orig_height, target_height_fn) def target_width_fn(): return tf.to_int32(tf.round(tf.to_float(orig_height) * new_aspect_ratio)) target_width = tf.cond(orig_aspect_ratio <= new_aspect_ratio, lambda: orig_width, target_width_fn) # either offset_height = 0 and offset_width is randomly chosen from # [0, offset_width - target_width), or else offset_width = 0 and # offset_height is randomly chosen from [0, offset_height - target_height) offset_height = _random_integer(0, orig_height - target_height + 1, seed) offset_width = _random_integer(0, orig_width - target_width + 1, seed) generator_func = lambda: (offset_height, offset_width) offset_height, offset_width = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.CROP_TO_ASPECT_RATIO, preprocess_vars_cache) new_image = tf.image.crop_to_bounding_box( image, offset_height, offset_width, target_height, target_width) im_box = tf.stack([ tf.to_float(offset_height) / tf.to_float(orig_height), tf.to_float(offset_width) / tf.to_float(orig_width), tf.to_float(offset_height + target_height) / tf.to_float(orig_height), tf.to_float(offset_width + target_width) / tf.to_float(orig_width) ]) boxlist = box_list.BoxList(boxes) boxlist.add_field('labels', labels) boxlist.add_field('label_weights', label_weights) if label_confidences is not None: boxlist.add_field('label_confidences', label_confidences) if multiclass_scores is not None: boxlist.add_field('multiclass_scores', multiclass_scores) im_boxlist = box_list.BoxList(tf.expand_dims(im_box, 0)) # remove boxes whose overlap with the image is less than overlap_thresh overlapping_boxlist, keep_ids = box_list_ops.prune_non_overlapping_boxes( boxlist, im_boxlist, overlap_thresh) # change the coordinate of the remaining boxes new_labels = overlapping_boxlist.get_field('labels') new_boxlist = box_list_ops.change_coordinate_frame(overlapping_boxlist, im_box) if clip_boxes: new_boxlist = box_list_ops.clip_to_window( new_boxlist, tf.constant([0.0, 0.0, 1.0, 1.0], tf.float32)) new_boxes = new_boxlist.get() result = [new_image, new_boxes, new_labels] new_label_weights = overlapping_boxlist.get_field('label_weights') result.append(new_label_weights) if label_confidences is not None: new_label_confidences = ( overlapping_boxlist.get_field('label_confidences')) result.append(new_label_confidences) if multiclass_scores is not None: new_multiclass_scores = overlapping_boxlist.get_field('multiclass_scores') result.append(new_multiclass_scores) if masks is not None: masks_inside_window = tf.gather(masks, keep_ids) masks_box_begin = tf.stack([0, offset_height, offset_width]) masks_box_size = tf.stack([-1, target_height, target_width]) new_masks = tf.slice(masks_inside_window, masks_box_begin, masks_box_size) result.append(new_masks) if keypoints is not None: keypoints_inside_window = tf.gather(keypoints, keep_ids) new_keypoints = keypoint_ops.change_coordinate_frame( keypoints_inside_window, im_box) if clip_boxes: new_keypoints = keypoint_ops.prune_outside_window(new_keypoints, [0.0, 0.0, 1.0, 1.0]) result.append(new_keypoints) return tuple(result) def random_pad_to_aspect_ratio(image, boxes, masks=None, keypoints=None, aspect_ratio=1.0, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Randomly zero pads an image to the specified aspect ratio. Pads the image so that the resulting image will have the specified aspect ratio without scaling less than the min_padded_size_ratio or more than the max_padded_size_ratio. If the min_padded_size_ratio or max_padded_size_ratio is lower than what is possible to maintain the aspect ratio, then this method will use the least padding to achieve the specified aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. aspect_ratio: aspect ratio of the final image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If masks, or keypoints is not None, the function also returns: masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] Raises: ValueError: If image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('RandomPadToAspectRatio', values=[image]): image_shape = tf.shape(image) image_height = tf.to_float(image_shape[0]) image_width = tf.to_float(image_shape[1]) image_aspect_ratio = image_width / image_height new_aspect_ratio = tf.constant(aspect_ratio, dtype=tf.float32) target_height = tf.cond( image_aspect_ratio <= new_aspect_ratio, lambda: image_height, lambda: image_width / new_aspect_ratio) target_width = tf.cond( image_aspect_ratio >= new_aspect_ratio, lambda: image_width, lambda: image_height * new_aspect_ratio) min_height = tf.maximum( min_padded_size_ratio[0] * image_height, target_height) min_width = tf.maximum( min_padded_size_ratio[1] * image_width, target_width) max_height = tf.maximum( max_padded_size_ratio[0] * image_height, target_height) max_width = tf.maximum( max_padded_size_ratio[1] * image_width, target_width) max_scale = tf.minimum(max_height / target_height, max_width / target_width) min_scale = tf.minimum( max_scale, tf.maximum(min_height / target_height, min_width / target_width)) generator_func = functools.partial(tf.random_uniform, [], min_scale, max_scale, seed=seed) scale = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.PAD_TO_ASPECT_RATIO, preprocess_vars_cache) target_height = tf.round(scale * target_height) target_width = tf.round(scale * target_width) new_image = tf.image.pad_to_bounding_box( image, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width)) im_box = tf.stack([ 0.0, 0.0, target_height / image_height, target_width / image_width ]) boxlist = box_list.BoxList(boxes) new_boxlist = box_list_ops.change_coordinate_frame(boxlist, im_box) new_boxes = new_boxlist.get() result = [new_image, new_boxes] if masks is not None: new_masks = tf.expand_dims(masks, -1) new_masks = tf.image.pad_to_bounding_box(new_masks, 0, 0, tf.to_int32(target_height), tf.to_int32(target_width)) new_masks = tf.squeeze(new_masks, [-1]) result.append(new_masks) if keypoints is not None: new_keypoints = keypoint_ops.change_coordinate_frame(keypoints, im_box) result.append(new_keypoints) return tuple(result) def random_black_patches(image, max_black_patches=10, probability=0.5, size_to_image_ratio=0.1, random_seed=None, preprocess_vars_cache=None): """Randomly adds some black patches to the image. This op adds up to max_black_patches square black patches of a fixed size to the image where size is specified via the size_to_image_ratio parameter. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. max_black_patches: number of times that the function tries to add a black box to the image. probability: at each try, what is the chance of adding a box. size_to_image_ratio: Determines the ratio of the size of the black patches to the size of the image. box_size = size_to_image_ratio * min(image_width, image_height) random_seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image """ def add_black_patch_to_image(image, idx): """Function for adding one patch to the image. Args: image: image idx: counter for number of patches that could have been added Returns: image with a randomly added black box """ image_shape = tf.shape(image) image_height = image_shape[0] image_width = image_shape[1] box_size = tf.to_int32( tf.multiply( tf.minimum(tf.to_float(image_height), tf.to_float(image_width)), size_to_image_ratio)) generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=(1.0 - size_to_image_ratio), seed=random_seed) normalized_y_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'y') normalized_x_min = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.ADD_BLACK_PATCH, preprocess_vars_cache, key=str(idx) + 'x') y_min = tf.to_int32(normalized_y_min * tf.to_float(image_height)) x_min = tf.to_int32(normalized_x_min * tf.to_float(image_width)) black_box = tf.ones([box_size, box_size, 3], dtype=tf.float32) mask = 1.0 - tf.image.pad_to_bounding_box(black_box, y_min, x_min, image_height, image_width) image = tf.multiply(image, mask) return image with tf.name_scope('RandomBlackPatchInImage', values=[image]): for idx in range(max_black_patches): generator_func = functools.partial(tf.random_uniform, [], minval=0.0, maxval=1.0, dtype=tf.float32, seed=random_seed) random_prob = _get_or_create_preprocess_rand_vars( generator_func, preprocessor_cache.PreprocessorCache.BLACK_PATCHES, preprocess_vars_cache, key=idx) image = tf.cond( tf.greater(random_prob, probability), lambda: image, functools.partial(add_black_patch_to_image, image=image, idx=idx)) return image def image_to_float(image): """Used in Faster R-CNN. Casts image pixel values to float. Args: image: input image which might be in tf.uint8 or sth else format Returns: image: image in tf.float32 format. """ with tf.name_scope('ImageToFloat', values=[image]): image = tf.to_float(image) return image def random_resize_method(image, target_size, preprocess_vars_cache=None): """Uses a random resize method to resize the image to target size. Args: image: a rank 3 tensor. target_size: a list of [target_height, target_width] preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: resized image. """ resized_image = _apply_with_random_selector( image, lambda x, method: tf.image.resize_images(x, target_size, method), num_cases=4, preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.RESIZE_METHOD) return resized_image def _compute_new_static_size(image, min_dimension, max_dimension): """Compute new static shape for resize_to_range method.""" image_shape = image.get_shape().as_list() orig_height = image_shape[0] orig_width = image_shape[1] num_channels = image_shape[2] orig_min_dim = min(orig_height, orig_width) # Calculates the larger of the possible sizes large_scale_factor = min_dimension / float(orig_min_dim) # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = int(round(orig_height * large_scale_factor)) large_width = int(round(orig_width * large_scale_factor)) large_size = [large_height, large_width] if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = max(orig_height, orig_width) small_scale_factor = max_dimension / float(orig_max_dim) # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = int(round(orig_height * small_scale_factor)) small_width = int(round(orig_width * small_scale_factor)) small_size = [small_height, small_width] new_size = large_size if max(large_size) > max_dimension: new_size = small_size else: new_size = large_size return tf.constant(new_size + [num_channels]) def _compute_new_dynamic_size(image, min_dimension, max_dimension): """Compute new dynamic shape for resize_to_range method.""" image_shape = tf.shape(image) orig_height = tf.to_float(image_shape[0]) orig_width = tf.to_float(image_shape[1]) num_channels = image_shape[2] orig_min_dim = tf.minimum(orig_height, orig_width) # Calculates the larger of the possible sizes min_dimension = tf.constant(min_dimension, dtype=tf.float32) large_scale_factor = min_dimension / orig_min_dim # Scaling orig_(height|width) by large_scale_factor will make the smaller # dimension equal to min_dimension, save for floating point rounding errors. # For reasonably-sized images, taking the nearest integer will reliably # eliminate this error. large_height = tf.to_int32(tf.round(orig_height * large_scale_factor)) large_width = tf.to_int32(tf.round(orig_width * large_scale_factor)) large_size = tf.stack([large_height, large_width]) if max_dimension: # Calculates the smaller of the possible sizes, use that if the larger # is too big. orig_max_dim = tf.maximum(orig_height, orig_width) max_dimension = tf.constant(max_dimension, dtype=tf.float32) small_scale_factor = max_dimension / orig_max_dim # Scaling orig_(height|width) by small_scale_factor will make the larger # dimension equal to max_dimension, save for floating point rounding # errors. For reasonably-sized images, taking the nearest integer will # reliably eliminate this error. small_height = tf.to_int32(tf.round(orig_height * small_scale_factor)) small_width = tf.to_int32(tf.round(orig_width * small_scale_factor)) small_size = tf.stack([small_height, small_width]) new_size = tf.cond( tf.to_float(tf.reduce_max(large_size)) > max_dimension, lambda: small_size, lambda: large_size) else: new_size = large_size return tf.stack(tf.unstack(new_size) + [num_channels]) def resize_to_range(image, masks=None, min_dimension=None, max_dimension=None, method=tf.image.ResizeMethod.BILINEAR, align_corners=False, pad_to_max_dimension=False, per_channel_pad_value=(0, 0, 0)): """Resizes an image so its dimensions are within the provided value. The output size can be described by two cases: 1. If the image can be rescaled so its minimum dimension is equal to the provided value without the other dimension exceeding max_dimension, then do so. 2. Otherwise, resize so the largest dimension is equal to max_dimension. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. min_dimension: (optional) (scalar) desired size of the smaller image dimension. max_dimension: (optional) (scalar) maximum allowed size of the larger image dimension. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. pad_to_max_dimension: Whether to resize the image and pad it with zeros so the resulting image is of the spatial size [max_dimension, max_dimension]. If masks are included they are padded similarly. per_channel_pad_value: A tuple of per-channel scalar value to use for padding. By default pads zeros. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A 3D tensor of shape [new_height, new_width, channels], where the image has been resized (with bilinear interpolation) so that min(new_height, new_width) == min_dimension or max(new_height, new_width) == max_dimension. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width]. resized_image_shape: A 1D tensor of shape [3] containing shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeToRange', values=[image, min_dimension]): if image.get_shape().is_fully_defined(): new_size = _compute_new_static_size(image, min_dimension, max_dimension) else: new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension) new_image = tf.image.resize_images( image, new_size[:-1], method=method, align_corners=align_corners) if pad_to_max_dimension: channels = tf.unstack(new_image, axis=2) if len(channels) != len(per_channel_pad_value): raise ValueError('Number of channels must be equal to the length of ' 'per-channel pad value.') new_image = tf.stack( [ tf.pad( channels[i], [[0, max_dimension - new_size[0]], [0, max_dimension - new_size[1]]], constant_values=per_channel_pad_value[i]) for i in range(len(channels)) ], axis=2) new_image.set_shape([max_dimension, max_dimension, 3]) result = [new_image] if masks is not None: new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_images( new_masks, new_size[:-1], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, align_corners=align_corners) if pad_to_max_dimension: new_masks = tf.image.pad_to_bounding_box( new_masks, 0, 0, max_dimension, max_dimension) new_masks = tf.squeeze(new_masks, 3) result.append(new_masks) result.append(new_size) return result # TODO(alirezafathi): Make sure the static shapes are preserved. def resize_to_min_dimension(image, masks=None, min_dimension=600): """Resizes image and masks given the min size maintaining the aspect ratio. If one of the image dimensions is smaller that min_dimension, it will scale the image such that its smallest dimension is equal to min_dimension. Otherwise, will keep the image size as is. Args: image: a tensor of size [height, width, channels]. masks: (optional) a tensors of size [num_instances, height, width]. min_dimension: minimum image dimension. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. Raises: ValueError: if the image is not a 3D tensor. """ if len(image.get_shape()) != 3: raise ValueError('Image should be 3D tensor') with tf.name_scope('ResizeGivenMinDimension', values=[image, min_dimension]): image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] num_channels = tf.shape(image)[2] min_image_dimension = tf.minimum(image_height, image_width) min_target_dimension = tf.maximum(min_image_dimension, min_dimension) target_ratio = tf.to_float(min_target_dimension) / tf.to_float( min_image_dimension) target_height = tf.to_int32(tf.to_float(image_height) * target_ratio) target_width = tf.to_int32(tf.to_float(image_width) * target_ratio) image = tf.image.resize_bilinear( tf.expand_dims(image, axis=0), size=[target_height, target_width], align_corners=True) result = [tf.squeeze(image, axis=0)] if masks is not None: masks = tf.image.resize_nearest_neighbor( tf.expand_dims(masks, axis=3), size=[target_height, target_width], align_corners=True) result.append(tf.squeeze(masks, axis=3)) result.append(tf.stack([target_height, target_width, num_channels])) return result def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None): """Scales boxes from normalized to pixel coordinates. Args: image: A 3D float32 tensor of shape [height, width, channels]. boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in normalized coordinates. Each row is of the form [ymin, xmin, ymax, xmax]. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. Returns: image: unchanged input image. scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the bounding boxes in pixel coordinates. scaled_keypoints: a 3D float32 tensor with shape [num_instances, num_keypoints, 2] containing the keypoints in pixel coordinates. """ boxlist = box_list.BoxList(boxes) image_height = tf.shape(image)[0] image_width = tf.shape(image)[1] scaled_boxes = box_list_ops.scale(boxlist, image_height, image_width).get() result = [image, scaled_boxes] if keypoints is not None: scaled_keypoints = keypoint_ops.scale(keypoints, image_height, image_width) result.append(scaled_keypoints) return tuple(result) # TODO(alirezafathi): Investigate if instead the function should return None if # masks is None. # pylint: disable=g-doc-return-or-yield def resize_image(image, masks=None, new_height=600, new_width=1024, method=tf.image.ResizeMethod.BILINEAR, align_corners=False): """Resizes images to the given height and width. Args: image: A 3D tensor of shape [height, width, channels] masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. new_height: (optional) (scalar) desired height of the image. new_width: (optional) (scalar) desired width of the image. method: (optional) interpolation method used in resizing. Defaults to BILINEAR. align_corners: bool. If true, exactly align all 4 corners of the input and output. Defaults to False. Returns: Note that the position of the resized_image_shape changes based on whether masks are present. resized_image: A tensor of size [new_height, new_width, channels]. resized_masks: If masks is not None, also outputs masks. A 3D tensor of shape [num_instances, new_height, new_width] resized_image_shape: A 1D tensor of shape [3] containing the shape of the resized image. """ with tf.name_scope( 'ResizeImage', values=[image, new_height, new_width, method, align_corners]): new_image = tf.image.resize_images( image, tf.stack([new_height, new_width]), method=method, align_corners=align_corners) image_shape = shape_utils.combined_static_and_dynamic_shape(image) result = [new_image] if masks is not None: num_instances = tf.shape(masks)[0] new_size = tf.stack([new_height, new_width]) def resize_masks_branch(): new_masks = tf.expand_dims(masks, 3) new_masks = tf.image.resize_nearest_neighbor( new_masks, new_size, align_corners=align_corners) new_masks = tf.squeeze(new_masks, axis=3) return new_masks def reshape_masks_branch(): # The shape function will be computed for both branches of the # condition, regardless of which branch is actually taken. Make sure # that we don't trigger an assertion in the shape function when trying # to reshape a non empty tensor into an empty one. new_masks = tf.reshape(masks, [-1, new_size[0], new_size[1]]) return new_masks masks = tf.cond(num_instances > 0, resize_masks_branch, reshape_masks_branch) result.append(masks) result.append(tf.stack([new_height, new_width, image_shape[2]])) return result def subtract_channel_mean(image, means=None): """Normalizes an image by subtracting a mean from each channel. Args: image: A 3D tensor of shape [height, width, channels] means: float list containing a mean for each channel Returns: normalized_images: a tensor of shape [height, width, channels] Raises: ValueError: if images is not a 4D tensor or if the number of means is not equal to the number of channels. """ with tf.name_scope('SubtractChannelMean', values=[image, means]): if len(image.get_shape()) != 3: raise ValueError('Input must be of size [height, width, channels]') if len(means) != image.get_shape()[-1]: raise ValueError('len(means) must match the number of channels') return image - [[means]] def one_hot_encoding(labels, num_classes=None): """One-hot encodes the multiclass labels. Example usage: labels = tf.constant([1, 4], dtype=tf.int32) one_hot = OneHotEncoding(labels, num_classes=5) one_hot.eval() # evaluates to [0, 1, 0, 0, 1] Args: labels: A tensor of shape [None] corresponding to the labels. num_classes: Number of classes in the dataset. Returns: onehot_labels: a tensor of shape [num_classes] corresponding to the one hot encoding of the labels. Raises: ValueError: if num_classes is not specified. """ with tf.name_scope('OneHotEncoding', values=[labels]): if num_classes is None: raise ValueError('num_classes must be specified') labels = tf.one_hot(labels, num_classes, 1, 0) return tf.reduce_max(labels, 0) def rgb_to_gray(image): """Converts a 3 channel RGB image to a 1 channel grayscale image. Args: image: Rank 3 float32 tensor containing 1 image -> [height, width, 3] with pixel values varying between [0, 1]. Returns: image: A single channel grayscale image -> [image, height, 1]. """ return _rgb_to_grayscale(image) def ssd_random_crop(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: rank 1 float32 tensor containing the weights. label_confidences: rank 1 float32 tensor containing the confidences. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If label_weights, multiclass_scores, masks, or keypoints is not None, the function also returns: label_weights: rank 1 float32 tensor with shape [num_instances]. multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ def random_crop_selector(selected_result, index): """Applies random_crop_image to selected result. Args: selected_result: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). index: The index that was randomly selected. Returns: A tuple containing image, boxes, labels, keypoints (if not None), and masks (if not None). """ i = 3 image, boxes, labels = selected_result[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None selected_masks = None selected_keypoints = None if label_weights is not None: selected_label_weights = selected_result[i] i += 1 if label_confidences is not None: selected_label_confidences = selected_result[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = selected_result[i] i += 1 if masks is not None: selected_masks = selected_result[i] i += 1 if keypoints is not None: selected_keypoints = selected_result[i] return random_crop_image( image=image, boxes=boxes, labels=labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, masks=selected_masks, keypoints=selected_keypoints, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = _apply_with_random_selector_tuples( tuple( t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores, masks, keypoints) if t is not None), random_crop_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_SELECTOR_ID) return result def ssd_random_crop_pad(image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, min_object_covered=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio_range=((0.5, 2.0),) * 6, area_range=((0.1, 1.0),) * 6, overlap_thresh=(0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 6, random_coef=(0.15,) * 6, min_padded_size_ratio=((1.0, 1.0),) * 6, max_padded_size_ratio=((2.0, 2.0),) * 6, pad_color=(None,) * 6, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. Args: image: rank 3 float32 tensor containing 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. pad_color: padding color. A rank 1 tensor of [3] with dtype=tf.float32. if set as None, it will be set to average color of the randomly cropped image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: Image shape will be [new_height, new_width, channels]. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. new_labels: new labels. new_label_weights: new label weights. """ def random_crop_pad_selector(image_boxes_labels, index): """Random crop preprocessing helper.""" i = 3 image, boxes, labels = image_boxes_labels[:i] selected_label_weights = None selected_label_confidences = None selected_multiclass_scores = None if label_weights is not None: selected_label_weights = image_boxes_labels[i] i += 1 if label_confidences is not None: selected_label_confidences = image_boxes_labels[i] i += 1 if multiclass_scores is not None: selected_multiclass_scores = image_boxes_labels[i] return random_crop_pad_image( image, boxes, labels, label_weights=selected_label_weights, label_confidences=selected_label_confidences, multiclass_scores=selected_multiclass_scores, min_object_covered=min_object_covered[index], aspect_ratio_range=aspect_ratio_range[index], area_range=area_range[index], overlap_thresh=overlap_thresh[index], clip_boxes=clip_boxes[index], random_coef=random_coef[index], min_padded_size_ratio=min_padded_size_ratio[index], max_padded_size_ratio=max_padded_size_ratio[index], pad_color=pad_color[index], seed=seed, preprocess_vars_cache=preprocess_vars_cache) return _apply_with_random_selector_tuples( tuple(t for t in (image, boxes, labels, label_weights, label_confidences, multiclass_scores) if t is not None), random_crop_pad_selector, num_cases=len(min_object_covered), preprocess_vars_cache=preprocess_vars_cache, key=preprocessor_cache.PreprocessorCache.SSD_CROP_PAD_SELECTOR_ID) def ssd_random_crop_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, seed=None, preprocess_vars_cache=None): """Random crop preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that the aspect ratio of the crops are fixed. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidences for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: aspect ratio of the cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If mulitclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 float32 tensor with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ aspect_ratio_range = ((aspect_ratio, aspect_ratio),) * len(area_range) crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_crop_to_aspect_ratio( new_image, new_boxes, new_labels, label_weights=new_label_weights, label_confidences=new_label_confidences, multiclass_scores=new_multiclass_scores, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, clip_boxes=clip_boxes, seed=seed, preprocess_vars_cache=preprocess_vars_cache) return result def ssd_random_crop_pad_fixed_aspect_ratio( image, boxes, labels, label_weights, label_confidences=None, multiclass_scores=None, masks=None, keypoints=None, min_object_covered=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), aspect_ratio=1.0, aspect_ratio_range=((0.5, 2.0),) * 7, area_range=((0.1, 1.0),) * 7, overlap_thresh=(0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0), clip_boxes=(True,) * 7, random_coef=(0.15,) * 7, min_padded_size_ratio=(1.0, 1.0), max_padded_size_ratio=(2.0, 2.0), seed=None, preprocess_vars_cache=None): """Random crop and pad preprocessing with default parameters as in SSD paper. Liu et al., SSD: Single shot multibox detector. For further information on random crop preprocessing refer to RandomCrop function above. The only difference is that after the initial crop, images are zero-padded to a fixed aspect ratio instead of being resized to that aspect ratio. Args: image: rank 3 float32 tensor contains 1 image -> [height, width, channels] with pixel values varying between [0, 1]. boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. labels: rank 1 int32 tensor containing the object classes. label_weights: float32 tensor of shape [num_instances] representing the weight for each box. label_confidences: (optional) float32 tensor of shape [num_instances] representing the confidence for each box. multiclass_scores: (optional) float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. masks: (optional) rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. The masks are of the same height, width as the input `image`. keypoints: (optional) rank 3 float32 tensor with shape [num_instances, num_keypoints, 2]. The keypoints are in y-x normalized coordinates. min_object_covered: the cropped image must cover at least this fraction of at least one of the input bounding boxes. aspect_ratio: the final aspect ratio to pad to. aspect_ratio_range: allowed range for aspect ratio of cropped image. area_range: allowed range for area ratio between cropped image and the original image. overlap_thresh: minimum overlap thresh with new cropped image to keep the box. clip_boxes: whether to clip the boxes to the cropped image. random_coef: a random coefficient that defines the chance of getting the original image. If random_coef is 0, we will always get the cropped image, and if it is 1.0, we will always get the original image. min_padded_size_ratio: min ratio of padded image height and width to the input image's height and width. max_padded_size_ratio: max ratio of padded image height and width to the input image's height and width. seed: random seed. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: image: image which is the same rank as input image. boxes: boxes which is the same rank as input boxes. Boxes are in normalized form. labels: new labels. If multiclass_scores, masks, or keypoints is not None, the function also returns: multiclass_scores: rank 2 with shape [num_instances, num_classes] masks: rank 3 float32 tensor with shape [num_instances, height, width] containing instance masks. keypoints: rank 3 float32 tensor with shape [num_instances, num_keypoints, 2] """ crop_result = ssd_random_crop( image, boxes, labels, label_weights=label_weights, label_confidences=label_confidences, multiclass_scores=multiclass_scores, masks=masks, keypoints=keypoints, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, overlap_thresh=overlap_thresh, clip_boxes=clip_boxes, random_coef=random_coef, seed=seed, preprocess_vars_cache=preprocess_vars_cache) i = 3 new_image, new_boxes, new_labels = crop_result[:i] new_label_weights = None new_label_confidences = None new_multiclass_scores = None new_masks = None new_keypoints = None if label_weights is not None: new_label_weights = crop_result[i] i += 1 if label_confidences is not None: new_label_confidences = crop_result[i] i += 1 if multiclass_scores is not None: new_multiclass_scores = crop_result[i] i += 1 if masks is not None: new_masks = crop_result[i] i += 1 if keypoints is not None: new_keypoints = crop_result[i] result = random_pad_to_aspect_ratio( new_image, new_boxes, masks=new_masks, keypoints=new_keypoints, aspect_ratio=aspect_ratio, min_padded_size_ratio=min_padded_size_ratio, max_padded_size_ratio=max_padded_size_ratio, seed=seed, preprocess_vars_cache=preprocess_vars_cache) result = list(result) i = 3 result.insert(2, new_labels) if new_label_weights is not None: result.insert(i, new_label_weights) i += 1 if new_label_confidences is not None: result.insert(i, new_label_confidences) i += 1 if multiclass_scores is not None: result.insert(i, new_multiclass_scores) result = tuple(result) return result def convert_class_logits_to_softmax(multiclass_scores, temperature=1.0): """Converts multiclass logits to softmax scores after applying temperature. Args: multiclass_scores: float32 tensor of shape [num_instances, num_classes] representing the score for each box for each class. temperature: Scale factor to use prior to applying softmax. Larger temperatures give more uniform distruibutions after softmax. Returns: multiclass_scores: float32 tensor of shape [num_instances, num_classes] with scaling and softmax applied. """ # Multiclass scores must be stored as logits. Apply temp and softmax. multiclass_scores_scaled = tf.divide( multiclass_scores, temperature, name='scale_logits') multiclass_scores = tf.nn.softmax(multiclass_scores_scaled, name='softmax') return multiclass_scores def get_default_func_arg_map(include_label_weights=True, include_label_confidences=False, include_multiclass_scores=False, include_instance_masks=False, include_keypoints=False): """Returns the default mapping from a preprocessor function to its args. Args: include_label_weights: If True, preprocessing functions will modify the label weights, too. include_label_confidences: If True, preprocessing functions will modify the label confidences, too. include_multiclass_scores: If True, preprocessing functions will modify the multiclass scores, too. include_instance_masks: If True, preprocessing functions will modify the instance masks, too. include_keypoints: If True, preprocessing functions will modify the keypoints, too. Returns: A map from preprocessing functions to the arguments they receive. """ groundtruth_label_weights = None if include_label_weights: groundtruth_label_weights = ( fields.InputDataFields.groundtruth_weights) groundtruth_label_confidences = None if include_label_confidences: groundtruth_label_confidences = ( fields.InputDataFields.groundtruth_confidences) multiclass_scores = None if include_multiclass_scores: multiclass_scores = (fields.InputDataFields.multiclass_scores) groundtruth_instance_masks = None if include_instance_masks: groundtruth_instance_masks = ( fields.InputDataFields.groundtruth_instance_masks) groundtruth_keypoints = None if include_keypoints: groundtruth_keypoints = fields.InputDataFields.groundtruth_keypoints prep_func_arg_map = { normalize_image: (fields.InputDataFields.image,), random_horizontal_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_vertical_flip: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_rotation90: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_pixel_value_scale: (fields.InputDataFields.image,), random_image_scale: ( fields.InputDataFields.image, groundtruth_instance_masks, ), random_rgb_to_gray: (fields.InputDataFields.image,), random_adjust_brightness: (fields.InputDataFields.image,), random_adjust_contrast: (fields.InputDataFields.image,), random_adjust_hue: (fields.InputDataFields.image,), random_adjust_saturation: (fields.InputDataFields.image,), random_distort_color: (fields.InputDataFields.image,), random_jitter_boxes: (fields.InputDataFields.groundtruth_boxes,), random_crop_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), random_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes), random_crop_pad_image: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), random_crop_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), random_pad_to_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_instance_masks, groundtruth_keypoints, ), random_black_patches: (fields.InputDataFields.image,), retain_boxes_above_threshold: ( fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), image_to_float: (fields.InputDataFields.image,), random_resize_method: (fields.InputDataFields.image,), resize_to_range: ( fields.InputDataFields.image, groundtruth_instance_masks, ), resize_to_min_dimension: ( fields.InputDataFields.image, groundtruth_instance_masks, ), scale_boxes_to_pixel_coordinates: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, groundtruth_keypoints, ), resize_image: ( fields.InputDataFields.image, groundtruth_instance_masks, ), subtract_channel_mean: (fields.InputDataFields.image,), one_hot_encoding: (fields.InputDataFields.groundtruth_image_classes,), rgb_to_gray: (fields.InputDataFields.image,), ssd_random_crop: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), ssd_random_crop_pad: (fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores), ssd_random_crop_fixed_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints), ssd_random_crop_pad_fixed_aspect_ratio: ( fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_classes, groundtruth_label_weights, groundtruth_label_confidences, multiclass_scores, groundtruth_instance_masks, groundtruth_keypoints, ), convert_class_logits_to_softmax: (multiclass_scores,), } return prep_func_arg_map def preprocess(tensor_dict, preprocess_options, func_arg_map=None, preprocess_vars_cache=None): """Preprocess images and bounding boxes. Various types of preprocessing (to be implemented) based on the preprocess_options dictionary e.g. "crop image" (affects image and possibly boxes), "white balance image" (affects only image), etc. If self._options is None, no preprocessing is done. Args: tensor_dict: dictionary that contains images, boxes, and can contain other things as well. images-> rank 4 float32 tensor contains 1 image -> [1, height, width, 3]. with pixel values varying between [0, 1] boxes-> rank 2 float32 tensor containing the bounding boxes -> [N, 4]. Boxes are in normalized form meaning their coordinates vary between [0, 1]. Each row is in the form of [ymin, xmin, ymax, xmax]. preprocess_options: It is a list of tuples, where each tuple contains a function and a dictionary that contains arguments and their values. func_arg_map: mapping from preprocessing functions to arguments that they expect to receive and return. preprocess_vars_cache: PreprocessorCache object that records previously performed augmentations. Updated in-place. If this function is called multiple times with the same non-null cache, it will perform deterministically. Returns: tensor_dict: which contains the preprocessed images, bounding boxes, etc. Raises: ValueError: (a) If the functions passed to Preprocess are not in func_arg_map. (b) If the arguments that a function needs do not exist in tensor_dict. (c) If image in tensor_dict is not rank 4 """ if func_arg_map is None: func_arg_map = get_default_func_arg_map() # changes the images to image (rank 4 to rank 3) since the functions # receive rank 3 tensor for image if fields.InputDataFields.image in tensor_dict: images = tensor_dict[fields.InputDataFields.image] if len(images.get_shape()) != 4: raise ValueError('images in tensor_dict should be rank 4') image = tf.squeeze(images, axis=0) tensor_dict[fields.InputDataFields.image] = image # Preprocess inputs based on preprocess_options for option in preprocess_options: func, params = option if func not in func_arg_map: raise ValueError('The function %s does not exist in func_arg_map' % (func.__name__)) arg_names = func_arg_map[func] for a in arg_names: if a is not None and a not in tensor_dict: raise ValueError('The function %s requires argument %s' % (func.__name__, a)) def get_arg(key): return tensor_dict[key] if key is not None else None args = [get_arg(a) for a in arg_names] if (preprocess_vars_cache is not None and 'preprocess_vars_cache' in inspect.getargspec(func).args): params['preprocess_vars_cache'] = preprocess_vars_cache results = func(*args, **params) if not isinstance(results, (list, tuple)): results = (results,) # Removes None args since the return values will not contain those. arg_names = [arg_name for arg_name in arg_names if arg_name is not None] for res, arg_name in zip(results, arg_names): tensor_dict[arg_name] = res # changes the image to images (rank 3 to rank 4) to be compatible to what # we received in the first place if fields.InputDataFields.image in tensor_dict: image = tensor_dict[fields.InputDataFields.image] images = tf.expand_dims(image, 0) tensor_dict[fields.InputDataFields.image] = images return tensor_dict
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/scripts/docker
docker
triton_inference_server
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Install Docker . /etc/os-release && \ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - && \ echo "deb [arch=amd64] https://download.docker.com/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list && \ curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey| apt-key add - && \ curl -s -L https://nvidia.github.io/nvidia-docker/$ID$VERSION_ID/nvidia-docker.list > /etc/apt/sources.list.d/nvidia-docker.list && \ apt-get update && \ apt-get install -y docker-ce docker-ce-cli containerd.io nvidia-docker2 WORKDIR="${WORKDIR:=$(pwd)}" export DATASETS_DIR=${WORKDIR}/datasets export WORKSPACE_DIR=${WORKDIR}/runner_workspace export CHECKPOINTS_DIR=${WORKSPACE_DIR}/checkpoints export MODEL_REPOSITORY_PATH=${WORKSPACE_DIR}/model_store export SHARED_DIR=${WORKSPACE_DIR}/shared_dir NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=all} docker run --rm -d \ -p 8000:8000 \ -p 8001:8001 \ -p 8002:8002 \ --runtime=nvidia \ -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \ -e ORT_TENSORRT_FP16_ENABLE=1 \ -v ${MODEL_REPOSITORY_PATH}:${MODEL_REPOSITORY_PATH} \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ --ipc=host \ nvcr.io/nvidia/tritonserver:21.12-py3 tritonserver \ --model-store=${MODEL_REPOSITORY_PATH} \ --strict-model-config=false \ --exit-on-error=true \ --model-control-mode=explicit
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/layers
layers
roi_align
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import torch from torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable from torch.nn.modules.utils import _pair from maskrcnn_benchmark import _C class _ROIAlign(Function): @staticmethod def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio, is_nhwc): ctx.save_for_backward(roi) ctx.output_size = _pair(output_size) ctx.spatial_scale = spatial_scale ctx.sampling_ratio = sampling_ratio ctx.input_shape = input.size() ctx.is_nhwc = is_nhwc output = _C.roi_align_forward( input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio, is_nhwc ) return output @staticmethod @once_differentiable def backward(ctx, grad_output): rois, = ctx.saved_tensors output_size = ctx.output_size spatial_scale = ctx.spatial_scale sampling_ratio = ctx.sampling_ratio bs, ch, h, w = ctx.input_shape grad_input = _C.roi_align_backward( grad_output, rois, spatial_scale, output_size[0], output_size[1], bs, ch, h, w, sampling_ratio, ctx.is_nhwc ) return grad_input, None, None, None, None, None roi_align = _ROIAlign.apply class ROIAlign(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio, is_nhwc): super(ROIAlign, self).__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio self.nhwc = is_nhwc @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32) def forward(self, input, rois): return roi_align( input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.nhwc ) def __repr__(self): tmpstr = self.__class__.__name__ + "(" tmpstr += "output_size=" + str(self.output_size) tmpstr += ", spatial_scale=" + str(self.spatial_scale) tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) tmpstr += ", is_nhwc=" + str(self.nhwc) tmpstr += ")" return tmpstr
PyTorch/LanguageModeling/BERT/triton/runner
runner
config
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, List, Optional, Union import yaml if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .configuration import Configuration from .core import DataObject from .triton import Triton class Checkpoint(DataObject): """ Checkpoint data placeholder """ name: str url: str def __init__(self, name: str, url: str): self.name = name self.url = url class Dataset(DataObject): """ Dataset data placeholder """ name: str def __init__(self, name: str): self.name = name class Config(DataObject): """ Configuration object for runner experiments """ def __init__( self, model_name: str, framework: str, container_version: str, configurations: List[Configuration], datasets_dir: str = "datasets", datasets: List[Dataset] = None, checkpoints: List[Checkpoint] = None, triton_dockerfile: Optional[str] = None, triton_container_image: Optional[str] = None, triton_custom_operations: Optional[str] = None, triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT, ): """ Args: model_name: Name of model framework: Framework used to create model container_version: Version of Triton Inference Server container used for evaluation configurations: List of experiments configurations datasets_dir: Directory where datasets are stored datasets: Datasets used for conversion/export checkpoints: Checkpoints with trained model triton_load_model_method: Triton Inference Server model loading mode triton_dockerfile: Dockerfile for Triton to build custom image triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile triton_custom_operations: Path where custom operation library is stored """ self.model_name = model_name self.framework = framework self.container_version = container_version self.configurations = configurations self.datasets_dir = datasets_dir self.datasets = datasets self.checkpoints = checkpoints self.triton_load_model_method = triton_load_model_method self.triton_dockerfile = triton_dockerfile self.triton_container_image = triton_container_image self.triton_custom_operations = triton_custom_operations def to_file(self, file_path: Union[pathlib.Path, str]) -> None: """ Save config data to file Args: file_path: path to file where config data is should be stored Returns: None """ data = self.to_dict() with open(file_path, "w") as f: yaml.safe_dump(data, f) @staticmethod def from_dict(config_data: Dict): """ Create configuration object from data stored in dictionary Args: config_data: dictionary with config data Returns: Config object """ configurations = [] for configuration_data in config_data["configurations"]: configuration = Configuration(**configuration_data) configurations.append(configuration) checkpoints = [] for checkpoint_data in config_data.get("checkpoints", []): checkpoint = Checkpoint( name=checkpoint_data["name"], url=checkpoint_data["url"], ) checkpoints.append(checkpoint) datasets = [] for dataset_data in config_data.get("datasets", []): dataset = Dataset(name=dataset_data["name"]) datasets.append(dataset) return Config( model_name=config_data["model_name"], framework=config_data["framework"], container_version=config_data["container_version"], configurations=configurations, checkpoints=checkpoints, datasets=datasets, datasets_dir=config_data.get("datasets_dir"), triton_load_model_method=config_data["triton_load_model_method"], triton_dockerfile=config_data.get("triton_dockerfile"), triton_container_image=config_data.get("triton_container_image"), triton_custom_operations=config_data.get("triton_custom_operations"), ) @staticmethod def from_file(file_path: Union[pathlib.Path, str]): """ Load experiment data from file Args: file_path: path to file where experiment data is stored Returns: Experiment object """ with open(file_path, "r") as f: config_data = yaml.safe_load(f) return Config.from_dict(config_data)
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules
quick_schedules
rpn_R_50_FPN_quick
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" RPN_ONLY: True BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 2000 FPN_POST_NMS_TOP_N_TEST: 2000 DATASETS: TRAIN: ("coco_2014_minival",) TEST: ("coco_2014_minival",) INPUT: MIN_SIZE_TRAIN: 600 MAX_SIZE_TRAIN: 1000 MIN_SIZE_TEST: 800 MAX_SIZE_TEST: 1000 DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.005 WEIGHT_DECAY: 0.0001 STEPS: (1500,) MAX_ITER: 2000 IMS_PER_BATCH: 4 TEST: IMS_PER_BATCH: 2
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
embedded_ssd_mobilenet_v1_coco
# Embedded SSD with Mobilenet v1 configuration for MSCOCO Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { ssd { num_classes: 90 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { num_layers: 5 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 } } image_resizer { fixed_shape_resizer { height: 256 width: 256 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 1 box_code_size: 4 apply_sigmoid_to_scores: false conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } } feature_extractor { type: 'embedded_ssd_mobilenet_v1' min_depth: 16 depth_multiplier: 0.125 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.001, } } } loss { classification_loss { weighted_sigmoid { } } localization_loss { weighted_smooth_l1 { } } hard_example_miner { num_hard_examples: 3000 iou_threshold: 0.99 loss_type: CLASSIFICATION max_negatives_per_positive: 3 min_negatives_per_image: 0 } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { batch_size: 32 optimizer { rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "/PATH_TO_BE_CONFIGURED/model.ckpt" data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { num_examples: 8000 use_moving_averages: true } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda
bermuda
onnx2trt_conv
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Dict, Iterable, Optional # pytype: disable=import-error import onnx import tensorrt as trt from ..core import BaseConverter, Format, Model, Precision, ShapeSpec from ..extensions import converters from .utils import get_input_shapes # pytype: enable=import-error LOGGER = logging.getLogger(__name__) TRT_LOGGER = trt.Logger(trt.Logger.INFO) class Onnx2TRTConverter(BaseConverter): def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str): self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size) cuda_engine = onnx2trt( model.handle, shapes=input_shapes, max_workspace_size=self._max_workspace_size, max_batch_size=self._max_batch_size, model_precision=self._precision.value, ) return model._replace(handle=cuda_engine) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 def onnx2trt( onnx_model: onnx.ModelProto, *, shapes: Dict[str, ShapeSpec], max_workspace_size: int, max_batch_size: int, model_precision: str, ) -> "trt.ICudaEngine": """ Converts onnx model to TensorRT ICudaEngine Args: onnx_model: onnx.Model to convert shapes: dictionary containing min shape, max shape, opt shape for each input name max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time. max_batch_size: The maximum batch size which can be used at execution time, and also the batch size for which the CudaEngine will be optimized. model_precision: precision of kernels (possible values: fp16, fp32) Returns: TensorRT ICudaEngine """ # Whether or not 16-bit kernels are permitted. # During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled. fp16_mode = "16" in model_precision builder = trt.Builder(TRT_LOGGER) builder.fp16_mode = fp16_mode builder.max_batch_size = max_batch_size builder.max_workspace_size = max_workspace_size # In TensorRT 7.0, the ONNX parser only supports full-dimensions mode, # meaning that your network definition must be created with the explicitBatch flag set. # For more information, see # https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flags) with trt.OnnxParser(network, TRT_LOGGER) as parser: # onnx model parsing if not parser.parse(onnx_model.SerializeToString()): for i in range(parser.num_errors): LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}") raise RuntimeError("Error during parsing ONNX model (see logs for details)") # optimization config = builder.create_builder_config() config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16) config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for name, spec in shapes.items(): profile.set_shape(name, **spec._asdict()) config.add_optimization_profile(profile) engine = builder.build_engine(network, config=config) return engine converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/cli/commands
commands
base_command
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class BaseCommand(object): def init_parser(self, base_parser): raise NotImplementedError() def run(self, args): raise NotImplementedError()
PyTorch/LanguageModeling/BERT/scripts
scripts
run_glue
#!/bin/bash # Copyright (c) 2019-2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e echo "Container nvidia build = " $NVIDIA_BUILD_ID init_checkpoint=${1:-"/workspace/bert/checkpoints/bert_uncased.pt"} data_dir=${2:-"$BERT_PREP_WORKING_DIR/download/glue/MRPC/"} vocab_file=${3:-"$BERT_PREP_WORKING_DIR/download/google_pretrained_weights/uncased_L-24_H-1024_A-16/vocab.txt"} config_file=${4:-"/workspace/bert/bert_configs/large.json"} out_dir=${5:-"/workspace/bert/results/MRPC"} task_name=${6:-"mrpc"} num_gpu=${7:-"8"} batch_size=${8:-"16"} gradient_accumulation_steps=${9:-"1"} learning_rate=${10:-"2.4e-5"} warmup_proportion=${11:-"0.1"} epochs=${12:-"3"} max_steps=${13:-"-1.0"} precision=${14:-"fp16"} seed=${15:-"2"} mode=${16:-"train eval"} mkdir -p $out_dir if [ "$mode" = "eval" ] ; then num_gpu=1 fi use_fp16="" if [ "$precision" = "fp16" ] ; then echo "fp16 activated!" use_fp16="--fp16" fi if [ "$num_gpu" = "1" ] ; then export CUDA_VISIBLE_DEVICES=0 mpi_command="" else unset CUDA_VISIBLE_DEVICES mpi_command=" -m torch.distributed.launch --nproc_per_node=$num_gpu" fi CMD="python $mpi_command run_glue.py " CMD+="--task_name ${task_name} " if [[ $mode == *"train"* ]] ; then CMD+="--do_train " CMD+="--train_batch_size=$batch_size " fi if [[ $mode == *"eval"* ]] || [[ $mode == *"prediction"* ]]; then if [[ $mode == *"eval"* ]] ; then CMD+="--do_eval " fi if [[ $mode == *"prediction"* ]] ; then CMD+="--do_predict " fi CMD+="--eval_batch_size=$batch_size " fi CMD+="--gradient_accumulation_steps=$gradient_accumulation_steps " CMD+="--do_lower_case " CMD+="--data_dir $data_dir " CMD+="--bert_model bert-large-uncased " CMD+="--seed $seed " CMD+="--init_checkpoint $init_checkpoint " CMD+="--warmup_proportion $warmup_proportion " CMD+="--max_seq_length 128 " CMD+="--learning_rate $learning_rate " CMD+="--num_train_epochs $epochs " CMD+="--max_steps $max_steps " CMD+="--vocab_file=$vocab_file " CMD+="--config_file=$config_file " CMD+="--output_dir $out_dir " CMD+="$use_fp16" LOGFILE=$out_dir/logfile $CMD |& tee $LOGFILE
TensorFlow/Detection/SSD/models/research/slim/nets/nasnet
nasnet
README
# TensorFlow-Slim NASNet-A Implementation/Checkpoints This directory contains the code for the NASNet-A model from the paper [Learning Transferable Architectures for Scalable Image Recognition](https://arxiv.org/abs/1707.07012) by Zoph et al. In nasnet.py there are three different configurations of NASNet-A that are implementented. One of the models is the NASNet-A built for CIFAR-10 and the other two are variants of NASNet-A trained on ImageNet, which are listed below. # Pre-Trained Models Two NASNet-A checkpoints are available that have been trained on the [ILSVRC-2012-CLS](http://www.image-net.org/challenges/LSVRC/2012/) image classification dataset. Accuracies were computed by evaluating using a single image crop. Model Checkpoint | Million MACs | Million Parameters | Top-1 Accuracy| Top-5 Accuracy | :----:|:------------:|:----------:|:-------:|:-------:| [NASNet-A_Mobile_224](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz)|564|5.3|74.0|91.6| [NASNet-A_Large_331](https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz)|23800|88.9|82.7|96.2| Here is an example of how to download the NASNet-A_Mobile_224 checkpoint. The way to download the NASNet-A_Large_331 is the same. ```shell CHECKPOINT_DIR=/tmp/checkpoints mkdir ${CHECKPOINT_DIR} cd ${CHECKPOINT_DIR} wget https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_mobile_04_10_2017.tar.gz tar -xvf nasnet-a_mobile_04_10_2017.tar.gz rm nasnet-a_mobile_04_10_2017.tar.gz ``` More information on integrating NASNet Models into your project can be found at the [TF-Slim Image Classification Library](https://github.com/tensorflow/models/blob/master/research/slim/README.md). To get started running models on-device go to [TensorFlow Mobile](https://www.tensorflow.org/mobile/). ## Sample Commands for using NASNet-A Mobile and Large Checkpoints for Inference ------- Run eval with the NASNet-A mobile ImageNet model ```shell DATASET_DIR=/tmp/imagenet EVAL_DIR=/tmp/tfmodel/eval CHECKPOINT_DIR=/tmp/checkpoints/model.ckpt python tensorflow_models/research/slim/eval_image_classifier \ --checkpoint_path=${CHECKPOINT_DIR} \ --eval_dir=${EVAL_DIR} \ --dataset_dir=${DATASET_DIR} \ --dataset_name=imagenet \ --dataset_split_name=validation \ --model_name=nasnet_mobile \ --eval_image_size=224 ``` Run eval with the NASNet-A large ImageNet model ```shell DATASET_DIR=/tmp/imagenet EVAL_DIR=/tmp/tfmodel/eval CHECKPOINT_DIR=/tmp/checkpoints/model.ckpt python tensorflow_models/research/slim/eval_image_classifier \ --checkpoint_path=${CHECKPOINT_DIR} \ --eval_dir=${EVAL_DIR} \ --dataset_dir=${DATASET_DIR} \ --dataset_name=imagenet \ --dataset_split_name=validation \ --model_name=nasnet_large \ --eval_image_size=331 ```
TensorFlow2/Segmentation/Contrib/UNet3P/data_preparation
data_preparation
extract_data
# extract testing data unzip data/Training_Batch1.zip -d data/ mv "data/media/nas/01_Datasets/CT/LITS/Training Batch 1/" "data/Training Batch 1/" rm -r data/media # extract training data unzip data/Training_Batch2.zip -d data/ mv "data/media/nas/01_Datasets/CT/LITS/Training Batch 2/" "data/Training Batch 2/" rm -r data/media
Tools/DGLPyTorch/SyntheticGraphGeneration/scripts
scripts
time_filter_tabformer
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import pandas as pd from pathlib import Path if __name__ == '__main__': tabformer_path = sys.argv[1] save_path = Path(tabformer_path).parent save_path = save_path / 'card_transaction.v2.csv' df = pd.read_csv(tabformer_path) # - create seconds columns to sort transactions by t = df["Time"].str.split(":", expand=True) t = t[0].apply(int) * 3600 + t[1].apply(int) * 60 df.loc[:, "Seconds"] = t df['Card ID'] = df["User"].astype(str) + df["Card"].astype(str) sorted_df = df.sort_values(by="Seconds") # - get last element tdf = sorted_df.groupby(by=["Card ID", "Merchant Name"], axis=0).tail(1).reset_index(drop=True) tdf = tdf.drop(columns=["Card ID", "Seconds"]) # - save data tdf.to_csv(save_path, index=False)
TensorFlow/Classification/ConvNets/triton/deployment_toolkit/library
library
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/DrugDiscovery/MoFlow/moflow/runtime
runtime
generate
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import numpy as np from torch.cuda.amp import autocast import torch from moflow.config import CONFIGS, Config from moflow.model.model import MoFlow from moflow.utils import convert_predictions_to_mols, postprocess_predictions from moflow.runtime.arguments import PARSER from moflow.runtime.common import get_newest_checkpoint, load_state from moflow.runtime.distributed_utils import get_device from moflow.runtime.logger import PerformanceLogger, setup_logging def infer(model: MoFlow, config: Config, device: torch.device, *, ln_var: float = 0, temp: float = 0.6, mu: Optional[torch.Tensor] = None, batch_size: int = 20) -> Tuple[np.ndarray, np.ndarray]: if mu is None: mu = torch.zeros(config.z_dim, dtype=torch.float32, device=device) sigma = temp * np.sqrt(np.exp(ln_var)) with torch.no_grad(): z = torch.normal(mu.reshape(-1, config.z_dim).repeat((batch_size, 1)), sigma) adj, x = model.reverse(z) x, adj = postprocess_predictions(x, adj, config=config) return adj, x if __name__ == '__main__': from rdkit import RDLogger RDLogger.DisableLog('rdApp.*') args = PARSER.parse_args() logger = setup_logging(args) perf_logger = PerformanceLogger(logger, args.batch_size, args.warmup_steps, mode='generate') if args.predictions_path: from rdkit.Chem import SmilesWriter smiles_writer = SmilesWriter(args.predictions_path) snapshot_path = get_newest_checkpoint(args.results_dir) config = CONFIGS[args.config_name] model = MoFlow(config) device = get_device(args.local_rank) if snapshot_path is not None: epoch, ln_var = load_state(snapshot_path, model, device=device) elif args.allow_untrained: epoch, ln_var = 0, 0 else: raise RuntimeError('Generating molecules from an untrained network! ' 'If this was intentional, pass --allow_untrained flag.') model.to(device=device, memory_format=torch.channels_last) model.eval() if args.jit: model.atom_model = torch.jit.script(model.atom_model) model.bond_model = torch.jit.script(model.bond_model) if args.steps == -1: args.steps = 1 with autocast(enabled=args.amp): for i in range(args.steps): perf_logger.update() results = infer( model, config, ln_var=ln_var, temp=args.temperature, batch_size=args.batch_size, device=device) if (i + 1) % args.log_interval == 0: perf_logger.summarize(step=(0, i, i)) if args.predictions_path: mols_batch = convert_predictions_to_mols(*results, correct_validity=args.correct_validity) for mol in mols_batch: smiles_writer.write(mol) perf_logger.summarize(step=tuple()) if args.predictions_path: smiles_writer.close()
TensorFlow/Detection/SSD/models/research/slim/nets/mobilenet
mobilenet
README
# MobileNetV2 This folder contains building code for MobileNetV2, based on [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) # Performance ## Latency This is the timing of [MobileNetV1](../mobilenet_v1.md) vs MobileNetV2 using TF-Lite on the large core of Pixel 1 phone. ![mnet_v1_vs_v2_pixel1_latency.png](mnet_v1_vs_v2_pixel1_latency.png) ## MACs MACs, also sometimes known as MADDs - the number of multiply-accumulates needed to compute an inference on a single image is a common metric to measure the efficiency of the model. Below is the graph comparing V2 vs a few selected networks. The size of each blob represents the number of parameters. Note for [ShuffleNet](https://arxiv.org/abs/1707.01083) there are no published size numbers. We estimate it to be comparable to MobileNetV2 numbers. ![madds_top1_accuracy](madds_top1_accuracy.png) # Pretrained models ## Imagenet Checkpoints Classification Checkpoint | MACs (M)| Parameters (M)| Top 1 Accuracy| Top 5 Accuracy | Mobile CPU (ms) Pixel 1 ---------------------------|---------|---------------|---------|----|------------- | [mobilenet_v2_1.4_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz) | 582 | 6.06 | 75.0 | 92.5 | 138.0 | [mobilenet_v2_1.3_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.3_224.tgz) | 509 | 5.34 | 74.4 | 92.1 | 123.0 | [mobilenet_v2_1.0_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz) | 300 | 3.47 | 71.8 | 91.0 | 73.8 | [mobilenet_v2_1.0_192](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_192.tgz) | 221 | 3.47 | 70.7 | 90.1 | 55.1 | [mobilenet_v2_1.0_160](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_160.tgz) | 154 | 3.47 | 68.8 | 89.0 | 40.2 | [mobilenet_v2_1.0_128](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_128.tgz) | 99 | 3.47 | 65.3 | 86.9 | 27.6 | [mobilenet_v2_1.0_96](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_96.tgz) | 56 | 3.47 | 60.3 | 83.2 | 17.6 | [mobilenet_v2_0.75_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_224.tgz) | 209 | 2.61 | 69.8 | 89.6 | 55.8 | [mobilenet_v2_0.75_192](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_192.tgz) | 153 | 2.61 | 68.7 | 88.9 | 41.6 | [mobilenet_v2_0.75_160](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_160.tgz) | 107 | 2.61 | 66.4 | 87.3 | 30.4 | [mobilenet_v2_0.75_128](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_128.tgz) | 69 | 2.61 | 63.2 | 85.3 | 21.9 | [mobilenet_v2_0.75_96](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.75_96.tgz) | 39 | 2.61 | 58.8 | 81.6 | 14.2 | [mobilenet_v2_0.5_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_224.tgz) | 97 | 1.95 | 65.4 | 86.4 | 28.7 | [mobilenet_v2_0.5_192](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_192.tgz) | 71 | 1.95 | 63.9 | 85.4 | 21.1 | [mobilenet_v2_0.5_160](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_160.tgz) | 50 | 1.95 | 61.0 | 83.2 | 14.9 | [mobilenet_v2_0.5_128](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_128.tgz) | 32 | 1.95 | 57.7 | 80.8 | 9.9 | [mobilenet_v2_0.5_96](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.5_96.tgz) | 18 | 1.95 | 51.2 | 75.8 | 6.4 | [mobilenet_v2_0.35_224](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_224.tgz) | 59 | 1.66 | 60.3 | 82.9 | 19.7 | [mobilenet_v2_0.35_192](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_192.tgz) | 43 | 1.66 | 58.2 | 81.2 | 14.6 | [mobilenet_v2_0.35_160](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_160.tgz) | 30 | 1.66 | 55.7 | 79.1 | 10.5 | [mobilenet_v2_0.35_128](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_128.tgz) | 20 | 1.66 | 50.8 | 75.0 | 6.9 | [mobilenet_v2_0.35_96](https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_0.35_96.tgz) | 11 | 1.66 | 45.5 | 70.4 | 4.5 # Training The numbers above can be reproduced using slim's `train_image_classifier`. Below is the set of parameters that achieves 72.0% for full size MobileNetV2, after about 700K when trained on 8 GPU. If trained on a single GPU the full convergence is after 5.5M steps. Also note that learning rate and num_epochs_per_decay both need to be adjusted depending on how many GPUs are being used due to slim's internal averaging. ```bash --model_name="mobilenet_v2" --learning_rate=0.045 * NUM_GPUS #slim internally averages clones so we compensate --preprocessing_name="inception_v2" --label_smoothing=0.1 --moving_average_decay=0.9999 --batch_size= 96 --num_clones = NUM_GPUS # you can use any number here between 1 and 8 depending on your hardware setup. --learning_rate_decay_factor=0.98 --num_epochs_per_decay = 2.5 / NUM_GPUS # train_image_classifier does per clone epochs ``` # Example See this [ipython notebook](mobilenet_example.ipynb) or open and run the network directly in [Colaboratory](https://colab.research.google.com/github/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet_example.ipynb).
PyTorch/Segmentation/MaskRCNN/pytorch/tests
tests
checkpoint
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from collections import OrderedDict import os from tempfile import TemporaryDirectory import unittest import torch from torch import nn from maskrcnn_benchmark.utils.model_serialization import load_state_dict from maskrcnn_benchmark.utils.checkpoint import Checkpointer class TestCheckpointer(unittest.TestCase): def create_model(self): return nn.Sequential(nn.Linear(2, 3), nn.Linear(3, 1)) def create_complex_model(self): m = nn.Module() m.block1 = nn.Module() m.block1.layer1 = nn.Linear(2, 3) m.layer2 = nn.Linear(3, 2) m.res = nn.Module() m.res.layer2 = nn.Linear(3, 2) state_dict = OrderedDict() state_dict["layer1.weight"] = torch.rand(3, 2) state_dict["layer1.bias"] = torch.rand(3) state_dict["layer2.weight"] = torch.rand(2, 3) state_dict["layer2.bias"] = torch.rand(2) state_dict["res.layer2.weight"] = torch.rand(2, 3) state_dict["res.layer2.bias"] = torch.rand(2) return m, state_dict def test_from_last_checkpoint_model(self): # test that loading works even if they differ by a prefix for trained_model, fresh_model in [ (self.create_model(), self.create_model()), (nn.DataParallel(self.create_model()), self.create_model()), (self.create_model(), nn.DataParallel(self.create_model())), ( nn.DataParallel(self.create_model()), nn.DataParallel(self.create_model()), ), ]: with TemporaryDirectory() as f: checkpointer = Checkpointer( trained_model, save_dir=f, save_to_disk=True ) checkpointer.save("checkpoint_file") # in the same folder fresh_checkpointer = Checkpointer(fresh_model, save_dir=f) self.assertTrue(fresh_checkpointer.has_checkpoint()) self.assertEqual( fresh_checkpointer.get_checkpoint_file(), os.path.join(f, "checkpoint_file.pth"), ) _ = fresh_checkpointer.load() for trained_p, loaded_p in zip( trained_model.parameters(), fresh_model.parameters() ): # different tensor references self.assertFalse(id(trained_p) == id(loaded_p)) # same content self.assertTrue(trained_p.equal(loaded_p)) def test_from_name_file_model(self): # test that loading works even if they differ by a prefix for trained_model, fresh_model in [ (self.create_model(), self.create_model()), (nn.DataParallel(self.create_model()), self.create_model()), (self.create_model(), nn.DataParallel(self.create_model())), ( nn.DataParallel(self.create_model()), nn.DataParallel(self.create_model()), ), ]: with TemporaryDirectory() as f: checkpointer = Checkpointer( trained_model, save_dir=f, save_to_disk=True ) checkpointer.save("checkpoint_file") # on different folders with TemporaryDirectory() as g: fresh_checkpointer = Checkpointer(fresh_model, save_dir=g) self.assertFalse(fresh_checkpointer.has_checkpoint()) self.assertEqual(fresh_checkpointer.get_checkpoint_file(), "") _ = fresh_checkpointer.load(os.path.join(f, "checkpoint_file.pth")) for trained_p, loaded_p in zip( trained_model.parameters(), fresh_model.parameters() ): # different tensor references self.assertFalse(id(trained_p) == id(loaded_p)) # same content self.assertTrue(trained_p.equal(loaded_p)) def test_complex_model_loaded(self): for add_data_parallel in [False, True]: model, state_dict = self.create_complex_model() if add_data_parallel: model = nn.DataParallel(model) load_state_dict(model, state_dict) for loaded, stored in zip(model.state_dict().values(), state_dict.values()): # different tensor references self.assertFalse(id(loaded) == id(stored)) # same content self.assertTrue(loaded.equal(stored)) if __name__ == "__main__": unittest.main()
TensorFlow2/Segmentation/MaskRCNN
MaskRCNN
main
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Training script for Mask-RCNN.""" import logging import os from argparse import Namespace from mrcnn_tf2.runtime.run import run_training, run_inference, run_evaluation from mrcnn_tf2.utils.dllogger import LoggingBackend os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' os.environ["TF_CPP_VMODULE"] = 'non_max_suppression_op=0,generate_box_proposals_op=0,executor=0' import dllogger from mrcnn_tf2.arguments import PARSER from mrcnn_tf2.config import CONFIG from mrcnn_tf2.dataset import Dataset def main(): # setup params arguments = PARSER.parse_args() params = Namespace(**{**vars(CONFIG), **vars(arguments)}) # setup logging # noinspection PyArgumentList logging.basicConfig( level=logging.DEBUG if params.verbose else logging.INFO, format='{asctime} {levelname:.1} {name:15} {message}', style='{' ) # remove custom tf handler that logs to stderr logging.getLogger('tensorflow').setLevel(logging.WARNING) logging.getLogger('tensorflow').handlers.clear() # setup dllogger dllogger.init(backends=[ dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=params.log_file, append=True), LoggingBackend(verbosity=dllogger.Verbosity.VERBOSE) ]) dllogger.log(step='PARAMETER', data=vars(params)) # setup dataset dataset = Dataset(params) if params.mode == 'train': run_training(dataset, params) if params.mode == 'eval': run_evaluation(dataset, params) if params.mode == 'infer': run_inference(dataset, params) if __name__ == '__main__': main()
PaddlePaddle/LanguageModeling/BERT/utils
utils
logger
# Copyright (c) 2021 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import paddle.distributed as dist import dllogger def format_step(step): """ Define prefix for different prefix message for dllogger. Args: step(str|tuple): Dllogger step format. Returns: s(str): String to print in log. """ if isinstance(step, str): return step s = "" if len(step) > 0: s += f"Epoch: {step[0]} " if len(step) > 1: s += f"Step: {step[1]} " if len(step) > 2: s += f"Validation Iteration: {step[2]} " if len(step) == 0: s = "Summary:" return s def setup_loggers(log_file): """ Setup logging and dllogger. Args: log_file(str): Path to log file. """ logging.basicConfig( level=logging.DEBUG, format='{asctime}:{levelname}: {message}', style='{') if dist.get_rank() == 0: dllogger.init(backends=[ dllogger.StdOutBackend( dllogger.Verbosity.DEFAULT, step_format=format_step), dllogger.JSONStreamBackend(dllogger.Verbosity.VERBOSE, log_file), ]) else: dllogger.init([])
PyTorch/SpeechSynthesis/Tacotron2/phrases
phrases
phrase_8_256
The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves and the form of printed letters should be beautiful, and that their arrangement on pages.
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/layers
layers
smooth_l1_loss
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch # TODO maybe push this to nn? def smooth_l1_loss(input, target, beta=1. / 9, size_average=True): """ very similar to the smooth_l1_loss from pytorch, but with the extra beta parameter """ n = torch.abs(input - target) cond = n < beta loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta) if size_average: return loss.mean() return loss.sum()
PyTorch/Classification/GPUNet/triton/05ms-D/runner
runner
__main__
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import pathlib from typing import List if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from ...runner.config import Config from ...runner.executor import Executor from ...runner.finalizer import ExperimentFinalizer from ...runner.maintainer import DockerMaintainer from ...runner.preparer import ExperimentPreparer from ...runner.runner_proxy import RunnerProxy from .pipeline_impl import pipeline class ExperimentRunner(RunnerProxy): """ Experiment Runner proxy for runner wrapper """ maintainer_cls = DockerMaintainer executor_cls = Executor preparer_cls = ExperimentPreparer finalizer_cls = ExperimentFinalizer def execute(config_path: str, devices: List[str]): if len(devices) == 0: devices = ["0"] config = Config.from_file(config_path) runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices) runner.start() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.") parser.add_argument( "--devices", type=str, nargs="*", required=False, help="Path to configuration file with details." ) args = parser.parse_args() config_path = args.config_path devices = args.devices execute(config_path, devices)
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer
data_transformer
ctab_data_transformer
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pandas as pd import torch from sklearn.mixture import BayesianGaussianMixture from syngen.utils.types import ColumnType from syngen.generator.tabular.data_transformer.base_data_transformer import ( BaseDataTransformer, ) class CTABDataTransformer(BaseDataTransformer): """ Data transformer for CTAB generator. Adopted from: https://github.com/zhao-zilong/CTAB-GAN """ def __init__( self, categorical_columns=(), mixed_dict={}, n_clusters=10, eps=0.005 ): self.meta = None self.n_clusters = n_clusters self.eps = eps self.categorical_columns = categorical_columns self.mixed_columns = mixed_dict def get_metadata(self, train_data): meta = [] for index, column_name in enumerate(train_data.columns): column = train_data.iloc[:, index] if index in self.categorical_columns: mapper = column.value_counts().index.tolist() meta.append( { "name": index, "type": ColumnType.CATEGORICAL, "size": len(mapper), "i2s": mapper, } ) elif index in self.mixed_columns.keys(): meta.append( { "name": index, "type": ColumnType.MIXED, "min": column.min(), "max": column.max(), "modal": self.mixed_columns[index], } ) else: meta.append( { "name": index, "type": ColumnType.CONTINUOUS, "min": column.min(), "max": column.max(), } ) return meta def fit(self, train_data: pd.DataFrame): data = train_data.values self.meta = self.get_metadata(train_data) model = [] self.ordering = [] self.output_info = [] self.output_dim = 0 self.components = [] self.filter_arr = [] for id_, info in enumerate(self.meta): if info["type"] == ColumnType.CONTINUOUS: gm = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm.fit(data[:, id_].reshape([-1, 1])) mode_freq = ( pd.Series(gm.predict(data[:, id_].reshape([-1, 1]))) .value_counts() .keys() ) model.append(gm) old_comp = gm.weights_ > self.eps comp = [] for i in range(self.n_clusters): if (i in (mode_freq)) & old_comp[i]: comp.append(True) else: comp.append(False) self.components.append(comp) self.output_info += [(1, "tanh"), (np.sum(comp), "softmax")] self.output_dim += 1 + np.sum(comp) elif info["type"] == ColumnType.MIXED: gm1 = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm2 = BayesianGaussianMixture( self.n_clusters, weight_concentration_prior_type="dirichlet_process", weight_concentration_prior=0.001, max_iter=100, n_init=1, random_state=42, ) gm1.fit(data[:, id_].reshape([-1, 1])) filter_arr = [] for element in data[:, id_]: if element not in info["modal"]: filter_arr.append(True) else: filter_arr.append(False) gm2.fit(data[:, id_][filter_arr].reshape([-1, 1])) mode_freq = ( pd.Series( gm2.predict(data[:, id_][filter_arr].reshape([-1, 1])) ) .value_counts() .keys() ) self.filter_arr.append(filter_arr) model.append((gm1, gm2)) old_comp = gm2.weights_ > self.eps comp = [] for i in range(self.n_clusters): if (i in (mode_freq)) & old_comp[i]: comp.append(True) else: comp.append(False) self.components.append(comp) self.output_info += [ (1, "tanh"), (np.sum(comp) + len(info["modal"]), "softmax"), ] self.output_dim += 1 + np.sum(comp) + len(info["modal"]) else: model.append(None) self.components.append(None) self.output_info += [(info["size"], "softmax")] self.output_dim += info["size"] self.model = model def transform(self, data, ispositive=False, positive_list=None): values = [] mixed_counter = 0 for id_, info in enumerate(self.meta): current = data[:, id_] if info["type"] == ColumnType.CONTINUOUS: current = current.reshape([-1, 1]) means = self.model[id_].means_.reshape((1, self.n_clusters)) stds = np.sqrt(self.model[id_].covariances_).reshape( (1, self.n_clusters) ) features = np.empty(shape=(len(current), self.n_clusters)) if ispositive: if id_ in positive_list: features = np.abs(current - means) / (4 * stds) else: features = (current - means) / (4 * stds) probs = self.model[id_].predict_proba(current.reshape([-1, 1])) n_opts = sum(self.components[id_]) features = features[:, self.components[id_]] probs = probs[:, self.components[id_]] opt_sel = np.zeros(len(data), dtype="int") for i in range(len(data)): pp = probs[i] + 1e-6 pp = pp / sum(pp) opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp) idx = np.arange((len(features))) features = features[idx, opt_sel].reshape([-1, 1]) features = np.clip(features, -0.99, 0.99) probs_onehot = np.zeros_like(probs) probs_onehot[np.arange(len(probs)), opt_sel] = 1 re_ordered_phot = np.zeros_like(probs_onehot) col_sums = probs_onehot.sum(axis=0) n = probs_onehot.shape[1] largest_indices = np.argsort(-1 * col_sums)[:n] self.ordering.append(largest_indices) for id, val in enumerate(largest_indices): re_ordered_phot[:, id] = probs_onehot[:, val] values += [features, re_ordered_phot] elif info["type"] == "mixed": means_0 = self.model[id_][0].means_.reshape([-1]) stds_0 = np.sqrt(self.model[id_][0].covariances_).reshape([-1]) zero_std_list = [] means_needed = [] stds_needed = [] for mode in info["modal"]: if mode != -9999999: dist = [] for idx, val in enumerate(list(means_0.flatten())): dist.append(abs(mode - val)) index_min = np.argmin(np.array(dist)) zero_std_list.append(index_min) else: continue for idx in zero_std_list: means_needed.append(means_0[idx]) stds_needed.append(stds_0[idx]) mode_vals = [] for i, j, k in zip(info["modal"], means_needed, stds_needed): this_val = np.abs(i - j) / (4 * k) mode_vals.append(this_val) if -9999999 in info["modal"]: mode_vals.append(0) current = current.reshape([-1, 1]) filter_arr = self.filter_arr[mixed_counter] current = current[filter_arr] means = self.model[id_][1].means_.reshape((1, self.n_clusters)) stds = np.sqrt(self.model[id_][1].covariances_).reshape( (1, self.n_clusters) ) features = np.empty(shape=(len(current), self.n_clusters)) if ispositive: if id_ in positive_list: features = np.abs(current - means) / (4 * stds) else: features = (current - means) / (4 * stds) probs = self.model[id_][1].predict_proba( current.reshape([-1, 1]) ) n_opts = sum(self.components[id_]) # 8 features = features[:, self.components[id_]] probs = probs[:, self.components[id_]] opt_sel = np.zeros(len(current), dtype="int") for i in range(len(current)): pp = probs[i] + 1e-6 pp = pp / sum(pp) opt_sel[i] = np.random.choice(np.arange(n_opts), p=pp) idx = np.arange((len(features))) features = features[idx, opt_sel].reshape([-1, 1]) features = np.clip(features, -0.99, 0.99) probs_onehot = np.zeros_like(probs) probs_onehot[np.arange(len(probs)), opt_sel] = 1 extra_bits = np.zeros([len(current), len(info["modal"])]) temp_probs_onehot = np.concatenate( [extra_bits, probs_onehot], axis=1 ) final = np.zeros( [len(data), 1 + probs_onehot.shape[1] + len(info["modal"])] ) features_curser = 0 for idx, val in enumerate(data[:, id_]): if val in info["modal"]: category_ = list(map(info["modal"].index, [val]))[0] final[idx, 0] = mode_vals[category_] final[idx, (category_ + 1)] = 1 else: final[idx, 0] = features[features_curser] final[ idx, (1 + len(info["modal"])) : ] = temp_probs_onehot[features_curser][ len(info["modal"]) : ] features_curser = features_curser + 1 just_onehot = final[:, 1:] re_ordered_jhot = np.zeros_like(just_onehot) n = just_onehot.shape[1] col_sums = just_onehot.sum(axis=0) largest_indices = np.argsort(-1 * col_sums)[:n] self.ordering.append(largest_indices) for id, val in enumerate(largest_indices): re_ordered_jhot[:, id] = just_onehot[:, val] final_features = final[:, 0].reshape([-1, 1]) values += [final_features, re_ordered_jhot] mixed_counter = mixed_counter + 1 else: self.ordering.append(None) col_t = np.zeros([len(data), info["size"]]) idx = list(map(info["i2s"].index, current)) col_t[np.arange(len(data)), idx] = 1 values.append(col_t) return np.concatenate(values, axis=1) def inverse_transform(self, data): data_t = np.zeros([len(data), len(self.meta)]) st = 0 for id_, info in enumerate(self.meta): if info["type"] == ColumnType.CONTINUOUS: u = data[:, st] v = data[:, st + 1 : st + 1 + np.sum(self.components[id_])] order = self.ordering[id_] v_re_ordered = np.zeros_like(v) for id, val in enumerate(order): v_re_ordered[:, val] = v[:, id] v = v_re_ordered u = np.clip(u, -1, 1) v_t = np.ones((data.shape[0], self.n_clusters)) * -100 v_t[:, self.components[id_]] = v v = v_t st += 1 + np.sum(self.components[id_]) means = self.model[id_].means_.reshape([-1]) stds = np.sqrt(self.model[id_].covariances_).reshape([-1]) p_argmax = np.argmax(v, axis=1) std_t = stds[p_argmax] mean_t = means[p_argmax] tmp = u * 4 * std_t + mean_t data_t[:, id_] = tmp elif info["type"] == "mixed": u = data[:, st] full_v = data[ :, (st + 1) : (st + 1) + len(info["modal"]) + np.sum(self.components[id_]), ] order = self.ordering[id_] full_v_re_ordered = np.zeros_like(full_v) for id, val in enumerate(order): full_v_re_ordered[:, val] = full_v[:, id] full_v = full_v_re_ordered mixed_v = full_v[:, : len(info["modal"])] v = full_v[:, -np.sum(self.components[id_]) :] u = np.clip(u, -1, 1) v_t = np.ones((data.shape[0], self.n_clusters)) * -100 v_t[:, self.components[id_]] = v v = np.concatenate([mixed_v, v_t], axis=1) st += 1 + np.sum(self.components[id_]) + len(info["modal"]) means = self.model[id_][1].means_.reshape([-1]) stds = np.sqrt(self.model[id_][1].covariances_).reshape([-1]) p_argmax = np.argmax(v, axis=1) result = np.zeros_like(u) for idx in range(len(data)): if p_argmax[idx] < len(info["modal"]): argmax_value = p_argmax[idx] result[idx] = float( list( map(info["modal"].__getitem__, [argmax_value]) )[0] ) else: std_t = stds[(p_argmax[idx] - len(info["modal"]))] mean_t = means[(p_argmax[idx] - len(info["modal"]))] result[idx] = u[idx] * 4 * std_t + mean_t data_t[:, id_] = result else: current = data[:, st : st + info["size"]] st += info["size"] idx = np.argmax(current, axis=1) data_t[:, id_] = list(map(info["i2s"].__getitem__, idx)) return data_t class ImageTransformer(BaseDataTransformer): def __init__(self, side): self.height = side def transform(self, data): if self.height * self.height > len(data[0]): padding = torch.zeros( (len(data), self.height * self.height - len(data[0])) ).to(data.device) data = torch.cat([data, padding], axis=1) return data.view(-1, 1, self.height, self.height) def inverse_transform(self, data): data = data.view(-1, self.height * self.height) return data
TensorFlow2/Recommendation/DLRM_and_DCNv2
DLRM_and_DCNv2
slurm_multinode
#!/bin/bash # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # author: Tomasz Grel (tgrel@nvidia.com) # This is a generic SLURM batch script. It runs $cmd # command in $cont docker image while mounting $mounts directories. # You can use the $srun_flags variable to pass additional # arguments to srun. # # It is designed to work with enroot/pyxis, but could be modified # to run on bare-metal machines as well. # # Example usage to train a 1.68TB DLRM variant using 32xA100-80GB GPUs on 4 nodes: # # cmd='numactl --interleave=all -- python -u main.py --dataset_path /data/dlrm/full_criteo_data --amp \ # --embedding_dim 512 --bottom_mlp_dims 512,256,512' \ # srun_flags='--mpi=pmix' \ # cont=dlrm_tf_adam \ # mounts=/data/dlrm:/data/dlrm \ # sbatch -n 32 -N 4 -t 00:20:00 slurm_multinode.sh # srun --mpi=none ${srun_flags} --ntasks-per-node=1 \ --container-image="${cont}" --container-mounts=${mounts} /bin/bash -c "$cmd"
PyTorch/LanguageModeling/BERT/triton
triton
dataloader
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from run_squad import convert_examples_to_features, read_squad_examples from tokenization import BertTokenizer def get_dataloader_fn( precision : str = 'fp32', batch_size: int = 8, vocab_file: str = "", do_lower_case: bool = True, predict_file: str = "", max_len: int = 512, max_seq_length: int = 384, doc_stride: int = 128, max_query_length: int = 64, version_2_with_negative: bool = False, pad_to_batch_size: bool = True, ): # Preprocess input data tokenizer = BertTokenizer(vocab_file, do_lower_case=do_lower_case, max_len=max_len) eval_examples = read_squad_examples( input_file=predict_file, is_training=False, version_2_with_negative=version_2_with_negative ) eval_features = convert_examples_to_features( examples=eval_examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=False, ) # get inputs all_unique_ids = [f.unique_id for f in eval_features] all_input_ids = [f.input_ids for f in eval_features] all_input_mask = [f.input_mask for f in eval_features] all_segment_ids = [f.segment_ids for f in eval_features] if pad_to_batch_size: # each batch should have a fixed size f = eval_features[-1] padding = batch_size - (len(all_unique_ids) % batch_size) all_unique_ids += [f.unique_id for _ in range(padding)] all_input_ids += [f.input_ids for _ in range(padding)] all_input_mask += [f.input_mask for _ in range(padding)] all_segment_ids += [f.segment_ids for _ in range(padding)] all_unique_ids = torch.tensor(all_unique_ids, dtype=torch.int32, requires_grad=False) all_input_ids = torch.tensor(all_input_ids, dtype=torch.int32, requires_grad=False) all_input_mask = torch.tensor(all_input_mask, dtype=torch.int32, requires_grad=False) all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.int32, requires_grad=False) eval_data = torch.utils.data.TensorDataset(all_unique_ids, all_input_ids, all_input_mask, all_segment_ids) eval_sampler = torch.utils.data.SequentialSampler(eval_data) eval_dataloader = torch.utils.data.DataLoader( eval_data, sampler=eval_sampler, batch_size=batch_size, shuffle=False, num_workers=0, ) dtype = { 'fp32' : np.float32, 'fp16' : np.float16 } dtype = dtype[precision] def _get_dataloader(): """return dataloader for inference""" for unique_id, input_ids, input_mask, segment_ids in eval_dataloader: unique_id = unique_id.cpu().numpy() input_ids = input_ids.cpu().numpy() input_mask = input_mask.cpu().numpy() segment_ids = segment_ids.cpu().numpy() x = {"input__0": input_ids, "input__1": segment_ids, "input__2": input_mask} y_real = { "output__0": np.zeros([batch_size, max_seq_length], dtype=dtype), "output__1": np.zeros([batch_size, max_seq_length], dtype=dtype), } yield (unique_id, x, y_real) return _get_dataloader
PyTorch/SpeechRecognition/Jasper/platform
platform
DGX2_Jasper_FP32_16GPU
#!/bin/bash NUM_GPUS=16 BATCH_SIZE=64 GRAD_ACCUMULATION_STEPS=1 bash scripts/train.sh "$@"
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts
scripts
waveglow_to_onnx
#!/usr/bin/env python3 ## # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of the NVIDIA CORPORATION nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import json import sys import onnx import numpy as np from scipy.io.wavfile import write import argparse import torch args = None def convert_conv_1d_to_2d(conv1d): conv2d = torch.nn.Conv2d(conv1d.weight.size(1), conv1d.weight.size(0), (conv1d.weight.size(2), 1), stride=(conv1d.stride[0], 1), dilation=(conv1d.dilation[0], 1), padding=(conv1d.padding[0], 0)) conv2d.weight.data[:, :, :, 0] = conv1d.weight.data conv2d.bias.data = conv1d.bias.data return conv2d def convert_WN_1d_to_2d_(WN): """ Modifies the WaveNet like affine coupling layer in-place to use 2-d convolutions """ WN.start = convert_conv_1d_to_2d(WN.start) WN.end = convert_conv_1d_to_2d(WN.end) for i in range(len(WN.in_layers)): WN.in_layers[i] = convert_conv_1d_to_2d(WN.in_layers[i]) for i in range(len(WN.res_skip_layers)): WN.res_skip_layers[i] = convert_conv_1d_to_2d(WN.res_skip_layers[i]) for i in range(len(WN.res_skip_layers)): WN.cond_layers[i] = convert_conv_1d_to_2d(WN.cond_layers[i]) def convert_convinv_1d_to_2d(convinv): """ Takes an invertible 1x1 1-d convolution and returns a 2-d convolution that does the inverse """ conv2d = torch.nn.Conv2d(convinv.W_inverse.size(1), convinv.W_inverse.size(0), 1, bias=False) conv2d.weight.data[:, :, :, 0] = convinv.W_inverse.data return conv2d def convert_1d_to_2d_(glow): """ Caffe2 and TensorRT don't seem to support 1-d convolutions or properly convert ONNX exports with 1d convolutions to 2d convolutions yet, so we do the conversion to 2-d convolutions before ONNX export """ # Convert upsample to 2d upsample = torch.nn.ConvTranspose2d(glow.upsample.weight.size(0), glow.upsample.weight.size(1), (glow.upsample.weight.size(2), 1), stride=(glow.upsample.stride[0], 1)) upsample.weight.data[:, :, :, 0] = glow.upsample.weight.data upsample.bias.data = glow.upsample.bias.data glow.upsample = upsample # Convert WN to 2d for WN in glow.WN: convert_WN_1d_to_2d_(WN) # Convert invertible conv to 2d for i in range(len(glow.convinv)): glow.convinv[i] = convert_convinv_1d_to_2d(glow.convinv[i]) def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): in_act = input_a+input_b in_left = in_act[:, 0:n_channels, :, :] in_right = in_act[:, n_channels:2*n_channels, :, :] t_act = torch.tanh(in_left) s_act = torch.sigmoid(in_right) acts = t_act * s_act return acts def WN_forward(self, forward_input): """ This is a forward replacement for the WN forward. This is required because the code was written for 1d convs which isn't yet supported from ONNX exports. """ audio, spect = forward_input audio = self.start(audio) for i in range(self.n_layers): acts = fused_add_tanh_sigmoid_multiply( self.in_layers[i](audio), self.cond_layers[i](spect), self.n_channels) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: audio = res_skip_acts[:, 0:self.n_channels, :, :] + audio skip_acts = res_skip_acts[:, self.n_channels:2*self.n_channels, :, :] else: skip_acts = res_skip_acts if i == 0: output = skip_acts else: output = skip_acts + output return self.end(output) def infer_o(self, spect, z): """ In order to for the trace to work running through ONNX with 2d convolutions we need to overwrite the forward method. All shape information is pre-calculated so ONNX doesn't export "Dynamic" outputs which are not yet suported by TensorRT """ batch_size = spect.size(0) spect = spect.permute(0, 3, 2, 1).contiguous() spect = self.upsample(spect) spect = torch.squeeze(spect, 3) spect = spect.view(batch_size, self.upsample_weight_size, self.length_spect_group, self.n_group) spect = spect.permute(0, 2, 1, 3) spect = spect.contiguous() spect = spect.view(batch_size, self.length_spect_group, self.upsample_weight_size*self.n_group) spect = spect.permute(0, 2, 1) spect = torch.unsqueeze(spect, 3) spect = spect.contiguous() audio = z[:, :self.n_remaining_channels, :, :] z = z[:, self.n_remaining_channels:self.n_group, :, :] for k in reversed(range(self.n_flows)): n_half = self.n_halves[k] audio_0 = audio[:, 0:n_half, :, :] audio_1 = audio[:, n_half:2*n_half, :, :] output = self.WN[k]((audio_0, spect)) s = output[:, n_half:2*n_half, :, :] b = output[:, 0:n_half, :, :] audio_1 = (audio_1 - b)/torch.exp(s) audio_0 = audio_0.expand(audio_1.size(0), audio_0.size(1), audio_0.size(2), audio_0.size(3)) audio = torch.cat([audio_0, audio_1], 1) audio = self.convinv[k](audio) if k % self.n_early_every == 0 and k > 0: zb = z[:, 0:self.n_early_size, :, :].expand(audio.size(0), self.n_early_size, z.size(2), z.size(3)) audio = torch.cat((zb, audio), 1) z = z[:, self.n_early_size:self.n_group - self.n_remaining_channels, :, :] audio = torch.squeeze(audio, 3) audio = audio.permute(0, 2, 1).contiguous().view( audio.size(0), (self.length_spect_group * self.n_group)) return audio def main(waveglow_path, output_path, batch_size, length_mels): """ Takes a waveglow model, a batch size, and a length in mels about outputs a static ONNX representation using 2D convoultions """ torch.manual_seed(0) model = load_waveglow(waveglow_path, waveglow_config) length_spect = length_mels length_samples = 768 + 256*length_spect model.upsample_weight_size = model.upsample.weight.size(0) spect = torch.cuda.FloatTensor( batch_size, model.upsample_weight_size, length_spect).normal_() spect = torch.autograd.Variable(spect.cuda(), requires_grad=False) # Run inference because it forces inverses to be calculated with torch.no_grad(): test_out1 = model.infer(spect) assert(length_samples % model.n_group == 0) model.length_spect_group = int(length_samples / model.n_group) # Pre-calculating the sizes of noise to use so it's not dynamic n_halves = [] n_half = int(model.n_remaining_channels/2) for k in reversed(range(model.n_flows)): n_halves.append(n_half) if k % model.n_early_every == 0 and k > 0: n_half = n_half + int(model.n_early_size/2) n_halves.reverse() model.n_halves = n_halves spect = torch.cuda.FloatTensor( batch_size, 1, length_spect, model.upsample.weight.size(0)).normal_() z = torch.cuda.FloatTensor( 1, model.n_group, model.length_spect_group, 1).normal_() spect = torch.autograd.Variable(spect.cuda(), requires_grad=False) z = torch.autograd.Variable(z, requires_grad=False) # Replace old forward with inference glow.WaveGlow.forward = infer_o #glow.WN.forward = WN_forward # Convert whole model to 2d convolutions convert_1d_to_2d_(model) model.cuda() # Get output for comparison with Caffe2 with torch.no_grad(): test_out2 = model(spect, z) # Export model torch.onnx.export(model, (spect, z), output_path, dynamic_axes={'spect': {0: 'batch_size'}, 'audio': {0: 'batch_size'}}, input_names=['spect', 'z'], output_names=['audio'], opset_version=10, verbose=True) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-w', '--waveglow_path', help='Path to waveglow decoder checkpoint with model', required=True) parser.add_argument('-W', '--tacotron2_home', help='Path to DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2 directory.', required=True) parser.add_argument('-o', "--onnx_path", help="Path to output ONNX file", required=True) parser.add_argument("--batch_size", default=1, type=int) parser.add_argument("--length_mels", default=160, type=int) # add wave glow arguments waveglow = parser.add_argument_group("WaveGlow parameters") waveglow.add_argument('--n-mel-channels', default=80, type=int, help='Number of bins in mel-spectrograms') # glow parameters waveglow.add_argument('--flows', default=12, type=int, help='Number of steps of flow') waveglow.add_argument('--groups', default=8, type=int, help='Number of samples in a group processed by the steps of flow') waveglow.add_argument('--early-every', default=4, type=int, help='Determines how often (i.e., after how many coupling layers) \ a number of channels (defined by --early-size parameter) are output\ to the loss function') waveglow.add_argument('--early-size', default=2, type=int, help='Number of channels output to the loss function') waveglow.add_argument('--sigma', default=1.0, type=float, help='Standard deviation used for sampling from Gaussian') waveglow.add_argument('--segment-length', default=4000, type=int, help='Segment length (audio samples) processed per iteration') # wavenet parameters wavenet = waveglow.add_argument_group('WaveNet parameters') wavenet.add_argument('--wn-kernel-size', default=3, type=int, help='Kernel size for dialted convolution in the affine coupling layer (WN)') wavenet.add_argument('--wn-channels', default=256, type=int, help='Number of channels in WN') wavenet.add_argument('--wn-layers', default=8, type=int, help='Number of layers in WN') args = parser.parse_args() # do imports as needed sys.path.append(args.tacotron2_home) import waveglow.model as glow from import_utils import load_waveglow global waveglow_config waveglow_config = { "n_mel_channels": args.n_mel_channels, "n_flows": args.flows, "n_group": args.groups, "n_early_every": args.early_every, "n_early_size": args.early_size, "WN_config": { "n_layers": args.wn_layers, "kernel_size": args.wn_kernel_size, "n_channels": args.wn_channels } } main(args.waveglow_path, args.onnx_path, args.batch_size, args.length_mels)
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/bin
bin
build_tacotron2
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "cudaUtils.h" #include "engineCache.h" #include "logging.h" #include "tacotron2Builder.h" #include "NvInfer.h" #include <iostream> #include <memory> #include <vector> using namespace nvinfer1; using namespace tts; /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ bool matches(const std::string& arg, const std::string& flag) { return arg.length() >= flag.length() && arg.substr(0, flag.length()) == flag; } int parseNumFlag( const int argc, const char** argv, const std::string& flag, int* i) { int value; const std::string arg(argv[*i]); if (arg.length() > flag.length()) { value = std::stol(arg.substr(flag.length())); } else if (*i + 1 < argc) { ++(*i); value = std::stol(argv[*i]); } else { throw std::runtime_error("Missing argument for '" + flag + "'."); } return value; } int parseAmpFlag( const int argc, const char** argv, const std::string& flag, int* i) { std::string str; const std::string arg(argv[*i]); if (arg.length() > flag.length()) { str = arg.substr(flag.length()); } else if (*i + 1 < argc) { ++(*i); str = argv[*i]; } else { throw std::runtime_error("Missing argument for '" + flag + "'."); } int value; if (str == "fp32") { value = 0; } else if (str == "amp") { value = 1; } else { throw std::runtime_error( "Invalid argument for precision (amp|fp32): " + str); } return value; } void usage(const std::string& binName) { std::cerr << "usage: " << std::endl; std::cerr << " " << binName << " <model file> <engine file> [options]\n"; std::cerr << "options:" << std::endl; std::cerr << " -I<max input length>" << std::endl; std::cerr << " -B<batch size>" << std::endl; std::cerr << " -F<precision (fp32|amp)>" << std::endl; std::cerr << " -h" << std::endl; } void parseArgs( const int argc, const char** const argv, std::string* model, std::string* enginePath, int* batchSize, int* inputLength, int* useAMP) { bool modelSet = false; bool enginePathSet = false; for (int i = 1; i < argc; ++i) { const std::string arg(argv[i]); if (matches(arg, "-I")) { *inputLength = parseNumFlag(argc, argv, "-I", &i); } else if (matches(arg, "-B")) { *batchSize = parseNumFlag(argc, argv, "-B", &i); } else if (matches(arg, "-F")) { *useAMP = parseAmpFlag(argc, argv, "-F", &i); } else if (matches(arg, "-h")) { usage(argv[0]); exit(0); } else { if (!modelSet) { *model = arg; modelSet = true; } else if (!enginePathSet) { *enginePath = arg; enginePathSet = true; } else { throw std::runtime_error("Unknown extra argument '" + arg + "'."); } } } } /****************************************************************************** * MAIN *********************************************************************** *****************************************************************************/ int main(int argc, const char* argv[]) { std::string tacotron2ModelPath; std::string enginePath; int batchSize = 1; int inputLength = 400; int useFP16 = true; parseArgs( argc, argv, &tacotron2ModelPath, &enginePath, &batchSize, &inputLength, &useFP16); CudaUtils::printDeviceInformation(); try { std::shared_ptr<Logger> logger(new Logger(ILogger::Severity::kERROR)); TRTPtr<IBuilder> builder(createInferBuilder(*logger)); EngineCache cache(logger); Tacotron2Builder tacotron2Builder(tacotron2ModelPath); const std::vector<TRTPtr<ICudaEngine>> engines = tacotron2Builder.build(inputLength, *builder, batchSize, useFP16); cache.save(engines, enginePath); } catch (const std::exception& e) { std::cerr << "Exception: " << e.what() << std::endl; return 1; } return 0; }
TensorFlow/LanguageModeling/BERT/biobert
biobert
conlleval
# Python version of the evaluation script from CoNLL'00- # Originates from: https://github.com/spyysalo/conlleval.py # Intentional differences: # - accept any space as delimiter by default # - optional file argument (default STDIN) # - option to set boundary (-b argument) # - LaTeX output (-l argument) not supported # - raw tags (-r argument) not supported # add function :evaluate(predicted_label, ori_label): which will not read from file import sys import re import codecs from collections import defaultdict, namedtuple ANY_SPACE = '<SPACE>' class FormatError(Exception): pass Metrics = namedtuple('Metrics', 'tp fp fn prec rec fscore') class EvalCounts(object): def __init__(self): self.correct_chunk = 0 # number of correctly identified chunks self.correct_tags = 0 # number of correct chunk tags self.found_correct = 0 # number of chunks in corpus self.found_guessed = 0 # number of identified chunks self.token_counter = 0 # token counter (ignores sentence breaks) # counts by type self.t_correct_chunk = defaultdict(int) self.t_found_correct = defaultdict(int) self.t_found_guessed = defaultdict(int) def parse_args(argv): import argparse parser = argparse.ArgumentParser( description='evaluate tagging results using CoNLL criteria', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) arg = parser.add_argument arg('-b', '--boundary', metavar='STR', default='-X-', help='sentence boundary') arg('-d', '--delimiter', metavar='CHAR', default=ANY_SPACE, help='character delimiting items in input') arg('-o', '--otag', metavar='CHAR', default='O', help='alternative outside tag') arg('file', nargs='?', default=None) return parser.parse_args(argv) def parse_tag(t): m = re.match(r'^([^-]*)-(.*)$', t) return m.groups() if m else (t, '') def evaluate(iterable, options=None): if options is None: options = parse_args([]) # use defaults counts = EvalCounts() num_features = None # number of features per line in_correct = False # currently processed chunks is correct until now last_correct = 'O' # previous chunk tag in corpus last_correct_type = '' # type of previously identified chunk tag last_guessed = 'O' # previously identified chunk tag last_guessed_type = '' # type of previous chunk tag in corpus for i, line in enumerate(iterable): line = line.rstrip('\r\n') # print(line) if options.delimiter == ANY_SPACE: features = line.split() else: features = line.split(options.delimiter) if num_features is None: num_features = len(features) elif num_features != len(features) and len(features) != 0: raise FormatError('unexpected number of features: %d (%d) at line %d\n%s' % (len(features), num_features, i, line)) if len(features) == 0 or features[0] == options.boundary: features = [options.boundary, 'O', 'O'] if len(features) < 3: raise FormatError('unexpected number of features in line %s' % line) guessed, guessed_type = parse_tag(features.pop()) correct, correct_type = parse_tag(features.pop()) first_item = features.pop(0) if first_item == options.boundary: guessed = 'O' end_correct = end_of_chunk(last_correct, correct, last_correct_type, correct_type) end_guessed = end_of_chunk(last_guessed, guessed, last_guessed_type, guessed_type) start_correct = start_of_chunk(last_correct, correct, last_correct_type, correct_type) start_guessed = start_of_chunk(last_guessed, guessed, last_guessed_type, guessed_type) if in_correct: if (end_correct and end_guessed and last_guessed_type == last_correct_type): in_correct = False counts.correct_chunk += 1 counts.t_correct_chunk[last_correct_type] += 1 elif (end_correct != end_guessed or guessed_type != correct_type): in_correct = False if start_correct and start_guessed and guessed_type == correct_type: in_correct = True if start_correct: counts.found_correct += 1 counts.t_found_correct[correct_type] += 1 if start_guessed: counts.found_guessed += 1 counts.t_found_guessed[guessed_type] += 1 if first_item != options.boundary: if correct == guessed and guessed_type == correct_type: counts.correct_tags += 1 counts.token_counter += 1 last_guessed = guessed last_correct = correct last_guessed_type = guessed_type last_correct_type = correct_type if in_correct: counts.correct_chunk += 1 counts.t_correct_chunk[last_correct_type] += 1 return counts def uniq(iterable): seen = set() return [i for i in iterable if not (i in seen or seen.add(i))] def calculate_metrics(correct, guessed, total): tp, fp, fn = correct, guessed-correct, total-correct p = 0 if tp + fp == 0 else 1.*tp / (tp + fp) r = 0 if tp + fn == 0 else 1.*tp / (tp + fn) f = 0 if p + r == 0 else 2 * p * r / (p + r) return Metrics(tp, fp, fn, p, r, f) def metrics(counts): c = counts overall = calculate_metrics( c.correct_chunk, c.found_guessed, c.found_correct ) by_type = {} for t in uniq(list(c.t_found_correct) + list(c.t_found_guessed)): by_type[t] = calculate_metrics( c.t_correct_chunk[t], c.t_found_guessed[t], c.t_found_correct[t] ) return overall, by_type def report(counts, out=None): if out is None: out = sys.stdout overall, by_type = metrics(counts) c = counts out.write('processed %d tokens with %d phrases; ' % (c.token_counter, c.found_correct)) out.write('found: %d phrases; correct: %d.\n' % (c.found_guessed, c.correct_chunk)) if c.token_counter > 0: out.write('accuracy: %6.2f%%; ' % (100.*c.correct_tags/c.token_counter)) out.write('precision: %6.2f%%; ' % (100.*overall.prec)) out.write('recall: %6.2f%%; ' % (100.*overall.rec)) out.write('FB1: %6.2f\n' % (100.*overall.fscore)) for i, m in sorted(by_type.items()): out.write('%17s: ' % i) out.write('precision: %6.2f%%; ' % (100.*m.prec)) out.write('recall: %6.2f%%; ' % (100.*m.rec)) out.write('FB1: %6.2f %d\n' % (100.*m.fscore, c.t_found_guessed[i])) def report_notprint(counts): overall, by_type = metrics(counts) c = counts final_report = [] line = [] line.append('processed %d tokens with %d phrases; ' % (c.token_counter, c.found_correct)) line.append('found: %d phrases; correct: %d.\n' % (c.found_guessed, c.correct_chunk)) final_report.append("".join(line)) if c.token_counter > 0: line = [] line.append('accuracy: %6.2f%%; ' % (100.*c.correct_tags/c.token_counter)) line.append('precision: %6.2f%%; ' % (100.*overall.prec)) line.append('recall: %6.2f%%; ' % (100.*overall.rec)) line.append('FB1: %6.2f\n' % (100.*overall.fscore)) final_report.append("".join(line)) for i, m in sorted(by_type.items()): line = [] line.append('%17s: ' % i) line.append('precision: %6.2f%%; ' % (100.*m.prec)) line.append('recall: %6.2f%%; ' % (100.*m.rec)) line.append('FB1: %6.2f %d\n' % (100.*m.fscore, c.t_found_guessed[i])) final_report.append("".join(line)) return final_report def end_of_chunk(prev_tag, tag, prev_type, type_): # check if a chunk ended between the previous and current word # arguments: previous and current chunk tags, previous and current types chunk_end = False if prev_tag == 'E': chunk_end = True if prev_tag == 'S': chunk_end = True if prev_tag == 'B' and tag == 'B': chunk_end = True if prev_tag == 'B' and tag == 'S': chunk_end = True if prev_tag == 'B' and tag == 'O': chunk_end = True if prev_tag == 'I' and tag == 'B': chunk_end = True if prev_tag == 'I' and tag == 'S': chunk_end = True if prev_tag == 'I' and tag == 'O': chunk_end = True if prev_tag != 'O' and prev_tag != '.' and prev_type != type_: chunk_end = True # these chunks are assumed to have length 1 if prev_tag == ']': chunk_end = True if prev_tag == '[': chunk_end = True return chunk_end def start_of_chunk(prev_tag, tag, prev_type, type_): # check if a chunk started between the previous and current word # arguments: previous and current chunk tags, previous and current types chunk_start = False if tag == 'B': chunk_start = True if tag == 'S': chunk_start = True if prev_tag == 'E' and tag == 'E': chunk_start = True if prev_tag == 'E' and tag == 'I': chunk_start = True if prev_tag == 'S' and tag == 'E': chunk_start = True if prev_tag == 'S' and tag == 'I': chunk_start = True if prev_tag == 'O' and tag == 'E': chunk_start = True if prev_tag == 'O' and tag == 'I': chunk_start = True if tag != 'O' and tag != '.' and prev_type != type_: chunk_start = True # these chunks are assumed to have length 1 if tag == '[': chunk_start = True if tag == ']': chunk_start = True return chunk_start def main(argv): args = parse_args(argv[1:]) if args.file is None: counts = evaluate(sys.stdin, args) else: with open(args.file) as f: counts = evaluate(f, args) report(counts) def return_report(input_file): with open(input_file, "r") as f: counts = evaluate(f) return report_notprint(counts) if __name__ == '__main__': # sys.exit(main(sys.argv)) return_report('/home/pengy6/data/sentence_similarity/data/cdr/test1/wanli_result2/label_test.txt')
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc/cpu
cpu
nms_cpu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include "cpu/vision.h" template <typename scalar_t> at::Tensor nms_cpu_kernel(const at::Tensor& dets, const at::Tensor& scores, const float threshold) { AT_ASSERTM(!dets.is_cuda(), "dets must be a CPU tensor"); AT_ASSERTM(!scores.is_cuda(), "scores must be a CPU tensor"); AT_ASSERTM(dets.scalar_type() == scores.scalar_type(), "dets should have the same type as scores"); if (dets.numel() == 0) { return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); } auto x1_t = dets.select(1, 0).contiguous(); auto y1_t = dets.select(1, 1).contiguous(); auto x2_t = dets.select(1, 2).contiguous(); auto y2_t = dets.select(1, 3).contiguous(); at::Tensor areas_t = (x2_t - x1_t + 1) * (y2_t - y1_t + 1); auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); auto ndets = dets.size(0); at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte).device(at::kCPU)); auto suppressed = suppressed_t.data_ptr<uint8_t>(); auto order = order_t.data_ptr<int64_t>(); auto x1 = x1_t.data_ptr<scalar_t>(); auto y1 = y1_t.data_ptr<scalar_t>(); auto x2 = x2_t.data_ptr<scalar_t>(); auto y2 = y2_t.data_ptr<scalar_t>(); auto areas = areas_t.data_ptr<scalar_t>(); for (int64_t _i = 0; _i < ndets; _i++) { auto i = order[_i]; if (suppressed[i] == 1) continue; auto ix1 = x1[i]; auto iy1 = y1[i]; auto ix2 = x2[i]; auto iy2 = y2[i]; auto iarea = areas[i]; for (int64_t _j = _i + 1; _j < ndets; _j++) { auto j = order[_j]; if (suppressed[j] == 1) continue; auto xx1 = std::max(ix1, x1[j]); auto yy1 = std::max(iy1, y1[j]); auto xx2 = std::min(ix2, x2[j]); auto yy2 = std::min(iy2, y2[j]); auto w = std::max(static_cast<scalar_t>(0), xx2 - xx1 + 1); auto h = std::max(static_cast<scalar_t>(0), yy2 - yy1 + 1); auto inter = w * h; auto ovr = inter / (iarea + areas[j] - inter); if (ovr >= threshold) suppressed[j] = 1; } } return at::nonzero(suppressed_t == 0).squeeze(1); } at::Tensor nms_cpu(const at::Tensor& dets, const at::Tensor& scores, const float threshold) { at::Tensor result; AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms", [&] { result = nms_cpu_kernel<scalar_t>(dets, scores, threshold); }); return result; }
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/csrc
csrc
ROIPool
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #pragma once #include "cpu/vision.h" #ifdef WITH_CUDA #include "cuda/vision.h" #endif std::tuple<at::Tensor, at::Tensor> ROIPool_forward(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width) { if (input.is_cuda()) { #ifdef WITH_CUDA return ROIPool_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width); #else AT_ERROR("Not compiled with GPU support"); #endif } AT_ERROR("Not implemented on the CPU"); } at::Tensor ROIPool_backward(const at::Tensor& grad, const at::Tensor& input, const at::Tensor& rois, const at::Tensor& argmax, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width) { if (grad.is_cuda()) { #ifdef WITH_CUDA return ROIPool_backward_cuda(grad, input, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width); #else AT_ERROR("Not compiled with GPU support"); #endif } AT_ERROR("Not implemented on the CPU"); }
TensorFlow2/Recommendation/SIM/preprocessing
preprocessing
parquet_to_tfrecord
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import multiprocessing import os import pathlib from functools import partial import click import pandas as pd import numpy as np import tensorflow as tf from sim.data.feature_spec import FeatureSpec from sim.data.defaults import TRAIN_MAPPING, TEST_MAPPING, REMAINDER_FILENAME, FILES_SELECTOR # Docker image sets it to "python" for NVTabular purposes (bugfix), which slows down the script 20x os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "cpp" logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s: %(message)s", ) def prepare_record(sample, all_feature_names, sequential_data_start, prebatch): feature = {} for idx, (f_name, data) in enumerate(zip(all_feature_names, sample.values())): if idx >= sequential_data_start: if prebatch: data = np.array(data).flatten() else: if not prebatch: data = [data] feature[f_name] = tf.train.Feature(int64_list=tf.train.Int64List(value=data)) return tf.train.Example(features=tf.train.Features(feature=feature)).SerializeToString() def save_records(output_path, records, base_output_path, feature_spec, mapping): with tf.io.TFRecordWriter(str(output_path)) as file_writer: for record_bytes in records: file_writer.write(record_bytes) feature_spec.source_spec[mapping][0][FILES_SELECTOR].append( str(output_path.relative_to(base_output_path)) ) logging.info(f'Created: {output_path}') @click.command() @click.option( "--amazon_dataset_path", required=True, help="Path to the dataset directory.", type=str, ) @click.option( "--tfrecord_output_dir", required=True, help="Path of directory to output tfrecord files.", type=str, ) @click.option( "--number_of_user_features", default=1, help="number of user specific features. Default is 1 for amazon books dataset (user_id).", type=int ) @click.option( "--max_seq_len", default=100, help="maximum possible length of history. (Entries will be padded to that length later).", type=int ) @click.option( "--n_proc", default=multiprocessing.cpu_count(), help="Number of processes started to speed up conversion to tfrecord.", type=int, ) @click.option( "--train_split_dir", default='train', help="Name of directory within amazon dataset directory containing train data.", type=str ) @click.option( "--test_split_dir", default='test', help="Name of directory within amazon dataset directory containing test data.", type=str, ) @click.option( "--metadata_file", default='metadata.json', help="Name of metadata file within amazon dataset directory (containing feature cardinalities).", type=str ) @click.option( "--train_output_dir", default='train', help="Name of train directory within output directory.", type=str ) @click.option( "--test_output_dir", default='test', help='Name of test directory within output directory.', type=str ) @click.option( "--train_parts", default=8, help="Number of output train files.", type=int ) @click.option( "--test_parts", default=4, help="Number of output test files.", type=int ) @click.option( "--prebatch_train_size", default=0, help='Apply batching to data in preprocessing. If prebatch_size == 0, no prebatching is done.', type=int ) @click.option( "--prebatch_test_size", default=0, help='Apply batching to data in preprocessing. If prebatch_size == 0, no prebatching is done.', type=int ) def main( amazon_dataset_path: str, tfrecord_output_dir: str, number_of_user_features: int, max_seq_len: int, n_proc: int, train_split_dir: str, test_split_dir: str, metadata_file: str, train_output_dir: str, test_output_dir: str, train_parts: int, test_parts: int, prebatch_train_size: int, prebatch_test_size: int ): """ read_parquet() create tf.train.Features create default FeatureSpec dump to Tfrecords """ amazon_dataset_path = pathlib.Path(amazon_dataset_path) tfrecord_output_dir = pathlib.Path(tfrecord_output_dir) input_splits = [ amazon_dataset_path / train_split_dir, amazon_dataset_path / test_split_dir ] output_splits = [ tfrecord_output_dir / train_output_dir, tfrecord_output_dir / test_output_dir ] for split_dir in output_splits: os.makedirs(split_dir, exist_ok=True) with open(amazon_dataset_path / metadata_file, 'r') as file: metadata = json.load(file) feature_cardinalities = [] for cardinality in metadata['cardinalities']: feature_cardinalities.append(cardinality['value']) user_features_cardinalities = feature_cardinalities[:number_of_user_features] item_features_cardinalities = feature_cardinalities[number_of_user_features:] feature_spec = FeatureSpec.get_default_feature_spec(user_features_cardinalities, item_features_cardinalities, max_seq_len) number_of_item_features = len(item_features_cardinalities) sequential_data_start = 1 + number_of_user_features + number_of_item_features all_feature_names = FeatureSpec.get_default_features_names(number_of_user_features, number_of_item_features) prebatch_per_split = [prebatch_train_size, prebatch_test_size] parts_per_split = [train_parts, test_parts] mappings = [TRAIN_MAPPING, TEST_MAPPING] for mapping, input_dir, output_dir, output_parts, prebatch_size in zip(mappings, input_splits, output_splits, parts_per_split, prebatch_per_split): prebatch = prebatch_size > 0 prepare_record_function = partial(prepare_record, all_feature_names=all_feature_names, sequential_data_start=sequential_data_start, prebatch=prebatch) save_records_function = partial(save_records, base_output_path=tfrecord_output_dir, feature_spec=feature_spec, mapping=mapping) logging.info(f"Started conversion, will output to {output_dir}") df = pd.read_parquet(input_dir, engine='pyarrow') logging.info("Parquet loaded") if prebatch: df['batch_index'] = df.index // prebatch_size df = df.groupby('batch_index').agg(list) if len(df.iloc[-1, 0]) < prebatch_size: remainder = df[-1:].to_dict('records')[0] remainder = prepare_record_function(remainder) df = df[:-1] logging.info("Prebatching applied") df = df.to_dict('records') with multiprocessing.Pool(n_proc) as pool: records = pool.map(prepare_record_function, df) logging.info("Records created") records = np.array_split(records, output_parts) for i, records_part in enumerate(records): if len(records_part) > 0: save_records_function(output_dir / f'part_{i}.tfrecord', records_part) if prebatch: save_records_function(output_dir / REMAINDER_FILENAME, [remainder]) feature_spec.to_yaml(tfrecord_output_dir / 'feature_spec.yaml') if __name__ == "__main__": main()
PyTorch/LanguageModeling/BERT/triton/dist6l/scripts/docker
docker
triton_inference_server
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:=all} docker run --rm -d \ -p 8000:8000 \ -p 8001:8001 \ -p 8002:8002 \ --runtime=nvidia \ -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES} \ -e ORT_TENSORRT_FP16_ENABLE=1 \ -v ${MODEL_REPOSITORY_PATH}:${MODEL_REPOSITORY_PATH} \ --ipc=host \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ nvcr.io/nvidia/tritonserver:21.10-py3 tritonserver \ --model-store=${MODEL_REPOSITORY_PATH} \ --strict-model-config=false \ --exit-on-error=true \ --model-control-mode=explicit
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit/perf_analyzer
perf_analyzer
exceptions
class PerfAnalyzerException(Exception): def __init__(self, message: str): self._message = message def __str__(self): """ Get the exception string representation. Returns ------- str The message associated with this exception, or None if no message. """ return self._message @property def message(self): """ Get the exception message. Returns ------- str The message associated with this exception, or None if no message. """ return self._message
CUDA-Optimized/FastSpeech/fastspeech/hparams
hparams
infer
# Inheritance parent_yaml: "base.yaml" # Data meta_file: "metadata_test.csv" # Infer n_workers: 16 # Num of workers used in data loader. batch_size: 1 # Batch size. log_step: 1 # Step interval in logging on command-line and Tensorboard. use_fp16: False # Usage of half precision. use_trt: False # Usage of accelerated inference using TensorRT. pyprof_enabled: False # Usage of pyprof. Enable it only for profiling use.
PyTorch/LanguageModeling/BERT/data
data
WikicorpusTextFormatting
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os class WikicorpusTextFormatting: def __init__(self, wiki_path, output_filename, recursive = False): self.wiki_path = wiki_path self.recursive = recursive self.output_filename = output_filename # This puts one article per line def merge(self): with open(self.output_filename, mode='w', newline='\n') as ofile: for dirname in glob.glob(self.wiki_path + '/*/', recursive=False): for filename in glob.glob(dirname + 'wiki_*', recursive=self.recursive): print(filename) article_lines = [] article_open = False with open(filename, mode='r', newline='\n') as file: for line in file: if '<doc id=' in line: article_open = True elif '</doc>' in line: article_open = False for oline in article_lines[1:]: if oline != '\n': ofile.write(oline.rstrip() + " ") ofile.write("\n\n") article_lines = [] else: if article_open: article_lines.append(line)
PyTorch/Classification/GPUNet/triton/runner
runner
core
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import pathlib from enum import Enum from typing import Any, Dict, List import yaml class CustomDumper(yaml.Dumper): """ Custom YAML dumper to avoid craeting aliases """ def ignore_aliases(self, data: Dict) -> bool: return True class Paths: """ Paths mapping inside Triton Container """ MODEL_REPOSITORY_PATH = "/mnt/triton-models" LIBRARIES_PATH = "/mnt/libs" class Framework(Enum): """ Supported frameworks """ TensorFlow1 = "TensorFlow1" TensorFlow2 = "TensorFlow2" PyTorch = "PyTorch" class Command: """Represents wrapper of raw string command""" def __init__(self, data: str): """ Store command data Args: data: string with bash commands to execute """ self._data = data def __str__(self) -> str: """ String object representation Returns: String """ return self._data @dataclasses.dataclass class Measurement: offline_batch_sizes: List[int] offline_concurrency: List[int] online_batch_sizes: List[int] online_concurrency: List[int] min_shapes_batch: int max_shapes_batch: int opt_shapes_batch: int class DataObject: """ Data object representation handling recursive transformation from object to dict """ READ_ONLY = set() def to_dict(self) -> Dict: """ Represent object as dictionary Returns: Dict """ data = {} filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY} for key, value in filtered_data.items(): data[key] = self._convert_value(value) return data def _convert_value(self, value: Any) -> Any: """ Convert value based on its type Args: value: variable to convert Returns: Converted object """ if isinstance(value, DataObject): value = value.to_dict() elif isinstance(value, dict): value = self._from_dict(value) elif isinstance(value, list): value = self._from_list(value) elif isinstance(value, Enum): value = value.value elif isinstance(value, pathlib.Path): value = value.as_posix() return value def _from_dict(self, values: Dict) -> Any: """ Convert dictionary values Args: values: dictionary with values Returns: Any """ data = {} for key, value in values.items(): data[key] = self._convert_value(value) return data def _from_list(self, values: List) -> Any: """ Convert list of values Args: values: list with values Returns: Any """ items = [] for value in values: item = self._convert_value(value) items.append(item) return items AVAILABLE_FRAMEWORKS = [f.value for f in Framework] class Batching(Enum): DISABLED = "disabled" STATIC = "static" DYNAMIC = "dynamic"
TensorFlow/Classification/ConvNets/triton/deployment_toolkit
deployment_toolkit
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/LanguageModeling/BART
BART
requirements
dataclasses gitpython==3.1.29 rouge-score==0.1.2 pynvml==8.0.4 tqdm==4.64.1 git+https://github.com/NVIDIA/dllogger git+https://github.com/NVIDIA/lddl.git
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/synthesizer
synthesizer
base_synthesizer
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc class BaseSynthesizer(abc.ABC): """Base class for all ``Synthesizers``""" @classmethod def get_synthesizers(cls, include_parents=True): """Recursively find sublcasses of `BaseSynthesizer` Args: include_parents (bool): whether to include parents to other classes. (default: `True`) """ synthesizers = dict() for child in cls.__subclasses__(): children = child.get_synthesizers(include_parents) synthesizers.update(children) if include_parents or not children: if abc.ABC not in child.__bases__: synthesizers[child.__name__] = child return synthesizers def fit(self, *args, **kwargs): """fits synthesizer on a specified dataset""" raise NotImplementedError() def generate(self, *args, **kwargs): """generate graph using configured synthesizer""" raise NotImplementedError() def save(self, path: str): """save this synthesizer to disk Args: path (str): The path to save the synthesizer to """ raise NotImplementedError() @classmethod def load(cls, path: str): """load up a saved synthesizer object from disk. Args: path (str): The path to load the synthesizer from """ raise NotImplementedError() @staticmethod def add_args(parser): """optional function to add arguments to parser for the CLI interface""" return parser
PyTorch/Translation/GNMT
GNMT
.gitignore
__pycache__ tags *.log /results /data .DS_Store .rsyncignore
PyTorch/Classification/ConvNets/se-resnext101-32x4d/training/TF32
TF32
DGXA100_se-resnext101-32x4d_TF32_250E
python ./multiproc.py --nproc_per_node 8 ./launch.py --model se-resnext101-32x4d --precision TF32 --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
PyTorch/Detection/Efficientdet/effdet/config
config
__init__
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model_config import get_efficientdet_config, get_fpn_config, default_detection_model_configs, get_backbone_config from .train_config import default_detection_train_config
TensorFlow/LanguageModeling/BERT/triton/scripts
scripts
run_triton_tf
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. init_checkpoint=${1:-"data/download/nvidia_pretrained/bert_tf_squad11_large_384/model.ckpt"} batch_size=${2:-"8"} precision=${3:-"fp16"} use_xla=${4:-"true"} seq_length=${5:-"384"} doc_stride=${6:-"128"} bert_model=${7:-"large"} squad_version=${8:-"1.1"} triton_version_name=${9:-1} triton_model_name=${10:-"bert"} triton_export_model=${11:-"true"} triton_dyn_batching_delay=${12:-0} triton_engine_count=${13:-1} triton_model_overwrite=${14:-"False"} if [ "$bert_model" = "large" ] ; then export BERT_DIR=data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb else export BERT_DIR=data/download/nvidia_pretrained/bert_tf_squad11_base_128 fi export SQUAD_DIR=data/download/squad/v${squad_version} if [ "$squad_version" = "1.1" ] ; then version_2_with_negative="False" else version_2_with_negative="True" fi # Need to ignore case on some variables triton_export_model=$(echo "$triton_export_model" | tr '[:upper:]' '[:lower:]') # Explicitly save this variable to pass down to new containers NV_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"all"} echo " BERT directory set as " $BERT_DIR echo echo "Argument: " echo " init_checkpoint = $init_checkpoint" echo " Using TRT engine= $use_trt_model" echo " batch_size = $batch_size" echo " precision = $precision" echo " use_xla = $use_xla" echo " seq_length = $seq_length" echo " doc_stride = $doc_stride" echo " bert_model = $bert_model" echo " squad_version = $squad_version" echo " version_name = $triton_version_name" echo " model_name = $triton_model_name" echo " export_model = $triton_export_model" echo echo "Env: " echo " NVIDIA_VISIBLE_DEVICES = $NV_VISIBLE_DEVICES" echo # Export Model in SavedModel format if enabled if [ "$triton_export_model" = "true" ] ; then echo "Exporting model as: Name - $triton_model_name Version - $triton_version_name" bash triton/scripts/export_model.sh $init_checkpoint $batch_size $precision $use_xla $seq_length \ $doc_stride $BERT_DIR $triton_version_name $triton_model_name \ $triton_dyn_batching_delay $triton_engine_count $triton_model_overwrite fi # Start TRTIS server in detached state bash triton/scripts/launch_server.sh # Wait until server is up. curl on the health of the server and sleep until its ready bash triton/scripts/wait_for_triton_server.sh localhost # Start TRTIS client for inference on SQuAD Dataset bash triton/scripts/run_client.sh $batch_size $seq_length $doc_stride $triton_version_name $triton_model_name \ $BERT_DIR --version_2_with_negative=${version_2_with_negative} --predict_file=$SQUAD_DIR/dev-v${squad_version}.json # Evaluate SQuAD results bash scripts/docker/launch.sh "python $SQUAD_DIR/evaluate-v${squad_version}.py \ $SQUAD_DIR/dev-v${squad_version}.json /results/predictions.json" #Kill the TRTIS Server docker kill triton_server_cont
PyTorch/LanguageModeling/BERT
BERT
inference
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT inference script. Does not depend on dataset. """ from __future__ import absolute_import, division, print_function import argparse import collections import json import logging import math import os import random import sys from io import open import numpy as np import torch from tqdm import tqdm, trange from types import SimpleNamespace from file_utils import PYTORCH_PRETRAINED_BERT_CACHE from modeling import BertForQuestionAnswering, BertConfig, WEIGHTS_NAME, CONFIG_NAME from tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize) from run_squad import _get_best_indices, _compute_softmax, get_valid_prelim_predictions, get_answer_text if sys.version_info[0] == 2: import cPickle as pickle else: import pickle logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) import math import json import numpy as np import collections def preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer, max_seq_length, max_query_length): """ converts an example into a feature """ if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # truncate if too long length = len(all_doc_tokens) length = min(length, max_tokens_for_doc) tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(length): token_to_orig_map[len(tokens)] = tok_to_orig_index[i] token_is_max_context[len(tokens)] = True tokens.append(all_doc_tokens[i]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length tensors_for_inference = { 'input_ids': input_ids, 'input_mask': input_mask, 'segment_ids': segment_ids } tensors_for_inference = SimpleNamespace(**tensors_for_inference) tokens_for_postprocessing = { 'tokens': tokens, 'token_to_orig_map': token_to_orig_map, 'token_is_max_context': token_is_max_context } tokens_for_postprocessing = SimpleNamespace(**tokens_for_postprocessing) return tensors_for_inference, tokens_for_postprocessing RawResult = collections.namedtuple("RawResult", ["start_logits", "end_logits"]) def get_answer(doc_tokens, tokens_for_postprocessing, start_logits, end_logits, args): result = RawResult(start_logits=start_logits, end_logits=end_logits) predictions = [] Prediction = collections.namedtuple('Prediction', ['text', 'start_logit', 'end_logit']) if args.version_2_with_negative: null_val = (float("inf"), 0, 0) start_indices = _get_best_indices(result.start_logits, args.n_best_size) end_indices = _get_best_indices(result.end_logits, args.n_best_size) prelim_predictions = get_valid_prelim_predictions(start_indices, end_indices, tokens_for_postprocessing, result, args) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True ) if args.version_2_with_negative: score = result.start_logits[0] + result.end_logits[0] if score < null_val[0]: null_val = (score, result.start_logits[0], result.end_logits[0]) doc_tokens_obj = { 'doc_tokens': doc_tokens, } doc_tokens_obj = SimpleNamespace(**doc_tokens_obj) curr_predictions = [] seen_predictions = [] for pred in prelim_predictions: if len(curr_predictions) == args.n_best_size: break if pred.end_index > 0: # this is a non-null prediction final_text = get_answer_text(doc_tokens_obj, tokens_for_postprocessing, pred, args) if final_text in seen_predictions: continue else: final_text = "" seen_predictions.append(final_text) curr_predictions.append(Prediction(final_text, pred.start_logit, pred.end_logit)) predictions += curr_predictions # add empty prediction if args.version_2_with_negative: predictions.append(Prediction('', null_val[1], null_val[2])) nbest_answers = [] answer = None nbest = sorted(predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)[:args.n_best_size] total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry and entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_answers.append(output) if args.version_2_with_negative: score_diff = null_val[0] - best_non_null_entry.start_logit - best_non_null_entry.end_logit if score_diff > args.null_score_diff_threshold: answer = "" else: answer = best_non_null_entry.text else: answer = nbest_answers[0]['text'] return answer, nbest_answers def main(): parser = argparse.ArgumentParser() ## Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, " "bert-base-multilingual-cased, bert-base-chinese.") parser.add_argument("--init_checkpoint", default=None, type=str, required=True, help="The checkpoint file from pretraining") ## Other parameters parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. ") parser.add_argument("--seed", default=1, type=int) parser.add_argument("--question", default="Most antibiotics target bacteria and don't affect what class of organisms? ", type=str, help="question") parser.add_argument("--context", default="Within the genitourinary and gastrointestinal tracts, commensal flora serve as biological barriers by competing with pathogenic bacteria for food and space and, in some cases, by changing the conditions in their environment, such as pH or available iron. This reduces the probability that pathogens will reach sufficient numbers to cause illness. However, since most antibiotics non-specifically target bacteria and do not affect fungi, oral antibiotics can lead to an overgrowth of fungi and cause conditions such as a vaginal candidiasis (a yeast infection). There is good evidence that re-introduction of probiotic flora, such as pure cultures of the lactobacilli normally found in unpasteurized yogurt, helps restore a healthy balance of microbial populations in intestinal infections in children and encouraging preliminary data in studies on bacterial gastroenteritis, inflammatory bowel diseases, urinary tract infection and post-surgical infections. ", type=str, help="context") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--n_best_size", default=1, type=int, help="The total number of n-best predictions to generate. ") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, then the model can reply with "unknown". ') parser.add_argument('--null_score_diff_threshold', type=float, default=-11.0, help="If null_score - best_non_null is greater than the threshold predict 'unknown'. ") parser.add_argument('--vocab_file', type=str, default=None, required=True, help="Vocabulary mapping/file BERT was pretrainined on") parser.add_argument("--config_file", default=None, type=str, required=True, help="The BERT model config") parser.add_argument('--fp16', action='store_true', help="use mixed-precision") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") args = parser.parse_args() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) tokenizer = BertTokenizer(args.vocab_file, do_lower_case=args.do_lower_case, max_len=512) # for bert large # Prepare model config = BertConfig.from_json_file(args.config_file) # Padding for divisibility by 8 if config.vocab_size % 8 != 0: config.vocab_size += 8 - (config.vocab_size % 8) # initialize model model = BertForQuestionAnswering(config) model.load_state_dict(torch.load(args.init_checkpoint, map_location='cpu')["model"]) model.to(device) if args.fp16: model.half() model.eval() print("question: ", args.question) print("context: ", args.context) print() # preprocessing doc_tokens = args.context.split() query_tokens = tokenizer.tokenize(args.question) feature = preprocess_tokenized_text(doc_tokens, query_tokens, tokenizer, max_seq_length=args.max_seq_length, max_query_length=args.max_query_length) tensors_for_inference, tokens_for_postprocessing = feature input_ids = torch.tensor(tensors_for_inference.input_ids, dtype=torch.long).unsqueeze(0) segment_ids = torch.tensor(tensors_for_inference.segment_ids, dtype=torch.long).unsqueeze(0) input_mask = torch.tensor(tensors_for_inference.input_mask, dtype=torch.long).unsqueeze(0) # load tensors to device input_ids = input_ids.to(device) input_mask = input_mask.to(device) segment_ids = segment_ids.to(device) # run prediction with torch.no_grad(): start_logits, end_logits = model(input_ids, segment_ids, input_mask) # post-processing start_logits = start_logits[0].detach().cpu().tolist() end_logits = end_logits[0].detach().cpu().tolist() answer, answers = get_answer(doc_tokens, tokens_for_postprocessing, start_logits, end_logits, args) # print result print() print(answer) print() print(json.dumps(answers, indent=4)) if __name__ == "__main__": main()
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/inference
inference
inference_FP32
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode predict \ --use_xla \ --predict_batch_size 128 \ --predict_img_size 384 \ --predict_ckpt xxx \
PyTorch/Forecasting/TFT/triton/runner
runner
experiment
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import pathlib from datetime import datetime from typing import Any, Dict, Optional # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import DataObject class ExperimentStatus(object): """ Experiment status flags object """ SUCCEED = "Succeed" FAILED = "Failed" class StageStatus: """ Stages status flags object """ SUCCEED = "Succeed" FAILED = "Failed" class Stage(DataObject): """ Stage data object """ name: str status: str started_at: Optional[int] ended_at: Optional[int] result_path: Optional[str] result_type: Optional[str] def __init__( self, name: str, result_path: Optional[str], result_type: Optional[str], status: str = StageStatus.FAILED, started_at: Optional[int] = None, ended_at: Optional[int] = None, ): """ Args: name: name of stage result_path: path where results file is stored result_type: type of results status: success/fail status started_at: time when stage has started ended_at: time when stage has ended """ self.name = name self.status = status self.started_at = started_at self.ended_at = ended_at self.result_path = result_path self.result_type = result_type def start(self) -> None: """ Update stage execution info at start Returns: None """ self.started_at = int(datetime.utcnow().timestamp()) def end(self) -> None: """ Update stage execution info at end Returns: None """ self.status = StageStatus.SUCCEED self.ended_at = int(datetime.utcnow().timestamp()) class Experiment(DataObject): """ Experiment data object """ experiment_id: int parameters: Dict stages: Dict[str, Stage] results: Dict[str, str] status: str started_at: Optional[int] ended_at: Optional[int] def __init__( self, experiment_id: int, parameters: Dict, stages: Dict[str, Stage], results: Dict[str, str], started_at: Optional[int] = None, ended_at: Optional[int] = None, status: str = ExperimentStatus.FAILED, ): """ Args: experiment_id: experiment identifier parameters: dictionary with experiment configuration stages: dictionary with stages run in experiment results: mapping between results types and location where are stored started_at: time when experiment has started ended_at: time when experiment has ended status: experiment success/fail information """ self.experiment_id = experiment_id self.started_at = started_at self.ended_at = ended_at self.parameters = parameters self.stages = stages self.status = status self.results = results self.results_dir = f"experiment_{experiment_id}" def start(self) -> None: """ Update experiment execution info at start Returns: None """ self.started_at = int(datetime.utcnow().timestamp()) def end(self) -> None: """ Update experiment execution info at end Returns: None """ self.status = ExperimentStatus.SUCCEED self.ended_at = int(datetime.utcnow().timestamp()) @dataclasses.dataclass class Status: state: ExperimentStatus message: str @dataclasses.dataclass class ExperimentResult: """ Experiment result object """ status: Status experiment: Experiment results: Dict[str, pathlib.Path] payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
PyTorch/Classification/ConvNets/image_classification
image_classification
optimizers
import math import numpy as np import torch from torch import optim def get_optimizer(parameters, lr, args, state=None): if args.optimizer == "sgd": optimizer = get_sgd_optimizer( parameters, lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=args.nesterov, bn_weight_decay=args.bn_weight_decay, ) elif args.optimizer == "rmsprop": optimizer = get_rmsprop_optimizer( parameters, lr, alpha=args.rmsprop_alpha, momentum=args.momentum, weight_decay=args.weight_decay, eps=args.rmsprop_eps, bn_weight_decay=args.bn_weight_decay, ) if not state is None: optimizer.load_state_dict(state) return optimizer def get_sgd_optimizer( parameters, lr, momentum, weight_decay, nesterov=False, bn_weight_decay=False ): if bn_weight_decay: print(" ! Weight decay applied to BN parameters ") params = [v for n, v in parameters] else: print(" ! Weight decay NOT applied to BN parameters ") bn_params = [v for n, v in parameters if "bn" in n] rest_params = [v for n, v in parameters if not "bn" in n] print(len(bn_params)) print(len(rest_params)) params = [ {"params": bn_params, "weight_decay": 0}, {"params": rest_params, "weight_decay": weight_decay}, ] optimizer = torch.optim.SGD( params, lr, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov ) return optimizer def get_rmsprop_optimizer( parameters, lr, alpha, weight_decay, momentum, eps, bn_weight_decay=False ): bn_params = [v for n, v in parameters if "bn" in n] rest_params = [v for n, v in parameters if not "bn" in n] params = [ {"params": bn_params, "weight_decay": weight_decay if bn_weight_decay else 0}, {"params": rest_params, "weight_decay": weight_decay}, ] optimizer = torch.optim.RMSprop( params, lr=lr, alpha=alpha, weight_decay=weight_decay, momentum=momentum, eps=eps, ) return optimizer def lr_policy(lr_fn): def _alr(optimizer, iteration, epoch): lr = lr_fn(iteration, epoch) for param_group in optimizer.param_groups: param_group["lr"] = lr return lr return _alr def lr_step_policy(base_lr, steps, decay_factor, warmup_length): def _lr_fn(iteration, epoch): if epoch < warmup_length: lr = base_lr * (epoch + 1) / warmup_length else: lr = base_lr for s in steps: if epoch >= s: lr *= decay_factor return lr return lr_policy(_lr_fn) def lr_linear_policy(base_lr, warmup_length, epochs): def _lr_fn(iteration, epoch): if epoch < warmup_length: lr = base_lr * (epoch + 1) / warmup_length else: e = epoch - warmup_length es = epochs - warmup_length lr = base_lr * (1 - (e / es)) return lr return lr_policy(_lr_fn) def lr_cosine_policy(base_lr, warmup_length, epochs, end_lr=0): def _lr_fn(iteration, epoch): if epoch < warmup_length: lr = base_lr * (epoch + 1) / warmup_length else: e = epoch - warmup_length es = epochs - warmup_length lr = end_lr + (0.5 * (1 + np.cos(np.pi * e / es)) * (base_lr - end_lr)) return lr return lr_policy(_lr_fn) def lr_exponential_policy( base_lr, warmup_length, epochs, final_multiplier=0.001, decay_factor=None, decay_step=1, logger=None, ): """Exponential lr policy. Setting decay factor parameter overrides final_multiplier""" es = epochs - warmup_length if decay_factor is not None: epoch_decay = decay_factor else: epoch_decay = np.power( 2, np.log2(final_multiplier) / math.floor(es / decay_step) ) def _lr_fn(iteration, epoch): if epoch < warmup_length: lr = base_lr * (epoch + 1) / warmup_length else: e = epoch - warmup_length lr = base_lr * (epoch_decay ** math.floor(e / decay_step)) return lr return lr_policy(_lr_fn, logger=logger)
PyTorch/Translation/GNMT/seq2seq/data
data
dataset
# Copyright (c) 2017 Elad Hoffer # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import logging from operator import itemgetter import torch from torch.utils.data import DataLoader from torch.utils.data import Dataset import seq2seq.data.config as config from seq2seq.data.sampler import BucketingSampler from seq2seq.data.sampler import DistributedSampler from seq2seq.data.sampler import ShardingSampler from seq2seq.data.sampler import StaticDistributedSampler def build_collate_fn(batch_first=False, parallel=True, sort=False): """ Factory for collate_fn functions. :param batch_first: if True returns batches in (batch, seq) format, if False returns in (seq, batch) format :param parallel: if True builds batches from parallel corpus (src, tgt) :param sort: if True sorts by src sequence length within each batch """ def collate_seq(seq): """ Builds batches for training or inference. Batches are returned as pytorch tensors, with padding. :param seq: list of sequences """ lengths = torch.tensor([len(s) for s in seq], dtype=torch.int64) batch_length = max(lengths) shape = (len(seq), batch_length) seq_tensor = torch.full(shape, config.PAD, dtype=torch.int64) for i, s in enumerate(seq): end_seq = lengths[i] seq_tensor[i, :end_seq].copy_(s[:end_seq]) if not batch_first: seq_tensor = seq_tensor.t() return (seq_tensor, lengths) def parallel_collate(seqs): """ Builds batches from parallel dataset (src, tgt), optionally sorts batch by src sequence length. :param seqs: tuple of (src, tgt) sequences """ src_seqs, tgt_seqs = zip(*seqs) if sort: indices, src_seqs = zip(*sorted(enumerate(src_seqs), key=lambda item: len(item[1]), reverse=True)) tgt_seqs = [tgt_seqs[idx] for idx in indices] return tuple([collate_seq(s) for s in [src_seqs, tgt_seqs]]) def single_collate(src_seqs): """ Builds batches from text dataset, optionally sorts batch by src sequence length. :param src_seqs: source sequences """ if sort: indices, src_seqs = zip(*sorted(enumerate(src_seqs), key=lambda item: len(item[1]), reverse=True)) else: indices = range(len(src_seqs)) return collate_seq(src_seqs), tuple(indices) if parallel: return parallel_collate else: return single_collate class SyntheticDataset(Dataset): def __init__(self, vocab_size, seq_len, nsamples): self.vocab_size = vocab_size self.nsamples = nsamples self.seq_len = seq_len def __getitem__(self, idx): rand = torch.randint(0, self.vocab_size, size=(self.seq_len,)) return rand def unsort(self, array): return array def get_loader(self, batch_size=1, num_workers=0, batch_first=False, pad=False, repeat=1): collate_fn = build_collate_fn(batch_first, parallel=False, sort=True) sampler = StaticDistributedSampler(self, batch_size, pad, repeat) return DataLoader(self, batch_size=batch_size, collate_fn=collate_fn, sampler=sampler, num_workers=num_workers, pin_memory=True, drop_last=False) def __len__(self): return self.nsamples class RawTextDataset(Dataset): def __init__(self, raw_data=None, raw_datafile=None, tokenizer=None, sort=False, max_size=None): self.tokenizer = tokenizer self.sorted = False if raw_datafile: with open(raw_datafile, 'r') as f: self.raw_data = f.readlines() else: self.raw_data = raw_data if max_size: self.raw_data = self.raw_data[:max_size] self.lengths = [len(s.split()) for s in self.raw_data] if sort: self.sort_by_length() def __getitem__(self, idx): raw = self.raw_data[idx] tokenized = self.tokenizer.tokenize(raw) return tokenized def unsort(self, array): """ "Unsorts" given array (restores original order of elements before dataset was sorted by sequence length). :param array: array to be "unsorted" """ if self.sorted: inverse = sorted(enumerate(self.indices), key=itemgetter(1)) array = [array[i[0]] for i in inverse] return array def sort_by_length(self): output = sorted( enumerate(self.raw_data), key=lambda x: len(x[1].split()), ) self.indices, self.raw_data = zip(*output) self.lengths = [self.lengths[idx] for idx in self.indices] self.sorted = True def __len__(self): return len(self.raw_data) def get_loader(self, batch_size=1, num_workers=0, batch_first=False, pad=False, repeat=1): collate_fn = build_collate_fn(batch_first, parallel=False, sort=True) sampler = StaticDistributedSampler(self, batch_size, pad, repeat) return DataLoader(self, batch_size=batch_size, collate_fn=collate_fn, sampler=sampler, num_workers=num_workers, pin_memory=True, drop_last=False) class TextDataset(Dataset): def __init__(self, src_fname, tokenizer, min_len=None, max_len=None, sort=False, max_size=None): """ Constructor for the TextDataset. Builds monolingual dataset. :param src_fname: path to the file with data :param tokenizer: tokenizer :param min_len: minimum sequence length :param max_len: maximum sequence length :param sort: sorts dataset by sequence length :param max_size: loads at most 'max_size' samples from the input file, if None loads the entire dataset """ self.min_len = min_len self.max_len = max_len self.parallel = False self.sorted = False self.src = self.process_data(src_fname, tokenizer, max_size) if min_len is not None and max_len is not None: self.filter_data(min_len, max_len) lengths = [len(s) for s in self.src] self.lengths = torch.tensor(lengths) if sort: self.sort_by_length() def sort_by_length(self): """ Sorts dataset by the sequence length. """ self.lengths, indices = self.lengths.sort(descending=True) self.src = [self.src[idx] for idx in indices] self.indices = indices.tolist() self.sorted = True def unsort(self, array): """ "Unsorts" given array (restores original order of elements before dataset was sorted by sequence length). :param array: array to be "unsorted" """ if self.sorted: inverse = sorted(enumerate(self.indices), key=itemgetter(1)) array = [array[i[0]] for i in inverse] return array def filter_data(self, min_len, max_len): """ Preserves only samples which satisfy the following inequality: min_len <= sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length """ logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}') initial_len = len(self.src) filtered_src = [] for src in self.src: if min_len <= len(src) <= max_len: filtered_src.append(src) self.src = filtered_src filtered_len = len(self.src) logging.info(f'Pairs before: {initial_len}, after: {filtered_len}') def process_data(self, fname, tokenizer, max_size): """ Loads data from the input file. :param fname: input file name :param tokenizer: tokenizer :param max_size: loads at most 'max_size' samples from the input file, if None loads the entire dataset """ logging.info(f'Processing data from {fname}') data = [] with open(fname) as dfile: for idx, line in enumerate(dfile): if max_size and idx == max_size: break entry = tokenizer.segment(line) entry = torch.tensor(entry) data.append(entry) return data def __len__(self): return len(self.src) def __getitem__(self, idx): return self.src[idx] def get_loader(self, batch_size=1, seeds=None, shuffle=False, num_workers=0, batch_first=False, pad=False, batching=None, batching_opt={}): collate_fn = build_collate_fn(batch_first, parallel=self.parallel, sort=True) if shuffle: if batching == 'random': sampler = DistributedSampler(self, batch_size, seeds) elif batching == 'sharding': sampler = ShardingSampler(self, batch_size, seeds, batching_opt['shard_size']) elif batching == 'bucketing': sampler = BucketingSampler(self, batch_size, seeds, batching_opt['num_buckets']) else: raise NotImplementedError else: sampler = StaticDistributedSampler(self, batch_size, pad) return DataLoader(self, batch_size=batch_size, collate_fn=collate_fn, sampler=sampler, num_workers=num_workers, pin_memory=True, drop_last=False) class ParallelDataset(TextDataset): def __init__(self, src_fname, tgt_fname, tokenizer, min_len, max_len, sort=False, max_size=None): """ Constructor for the ParallelDataset. Tokenization is done when the data is loaded from the disk. :param src_fname: path to the file with src language data :param tgt_fname: path to the file with tgt language data :param tokenizer: tokenizer :param min_len: minimum sequence length :param max_len: maximum sequence length :param sort: sorts dataset by sequence length :param max_size: loads at most 'max_size' samples from the input file, if None loads the entire dataset """ self.min_len = min_len self.max_len = max_len self.parallel = True self.sorted = False self.src = self.process_data(src_fname, tokenizer, max_size) self.tgt = self.process_data(tgt_fname, tokenizer, max_size) assert len(self.src) == len(self.tgt) self.filter_data(min_len, max_len) assert len(self.src) == len(self.tgt) src_lengths = [len(s) for s in self.src] tgt_lengths = [len(t) for t in self.tgt] self.src_lengths = torch.tensor(src_lengths) self.tgt_lengths = torch.tensor(tgt_lengths) self.lengths = self.src_lengths + self.tgt_lengths if sort: self.sort_by_length() def sort_by_length(self): """ Sorts dataset by the sequence length. """ self.lengths, indices = self.lengths.sort(descending=True) self.src = [self.src[idx] for idx in indices] self.tgt = [self.tgt[idx] for idx in indices] self.src_lengths = [self.src_lengths[idx] for idx in indices] self.tgt_lengths = [self.tgt_lengths[idx] for idx in indices] self.indices = indices.tolist() self.sorted = True def filter_data(self, min_len, max_len): """ Preserves only samples which satisfy the following inequality: min_len <= src sample sequence length <= max_len AND min_len <= tgt sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length """ logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}') initial_len = len(self.src) filtered_src = [] filtered_tgt = [] for src, tgt in zip(self.src, self.tgt): if min_len <= len(src) <= max_len and \ min_len <= len(tgt) <= max_len: filtered_src.append(src) filtered_tgt.append(tgt) self.src = filtered_src self.tgt = filtered_tgt filtered_len = len(self.src) logging.info(f'Pairs before: {initial_len}, after: {filtered_len}') def __getitem__(self, idx): return self.src[idx], self.tgt[idx] class LazyParallelDataset(TextDataset): def __init__(self, src_fname, tgt_fname, tokenizer, min_len, max_len, sort=False, max_size=None): """ Constructor for the LazyParallelDataset. Tokenization is done on the fly. :param src_fname: path to the file with src language data :param tgt_fname: path to the file with tgt language data :param tokenizer: tokenizer :param min_len: minimum sequence length :param max_len: maximum sequence length :param sort: sorts dataset by sequence length :param max_size: loads at most 'max_size' samples from the input file, if None loads the entire dataset """ self.min_len = min_len self.max_len = max_len self.parallel = True self.sorted = False self.tokenizer = tokenizer self.raw_src = self.process_raw_data(src_fname, max_size) self.raw_tgt = self.process_raw_data(tgt_fname, max_size) assert len(self.raw_src) == len(self.raw_tgt) logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}') # Subtracting 2 because EOS and BOS are added later during tokenization self.filter_raw_data(min_len - 2, max_len - 2) assert len(self.raw_src) == len(self.raw_tgt) # Adding 2 because EOS and BOS are added later during tokenization src_lengths = [i + 2 for i in self.src_len] tgt_lengths = [i + 2 for i in self.tgt_len] self.src_lengths = torch.tensor(src_lengths) self.tgt_lengths = torch.tensor(tgt_lengths) self.lengths = self.src_lengths + self.tgt_lengths def process_raw_data(self, fname, max_size): """ Loads data from the input file. :param fname: input file name :param max_size: loads at most 'max_size' samples from the input file, if None loads the entire dataset """ logging.info(f'Processing data from {fname}') data = [] with open(fname) as dfile: for idx, line in enumerate(dfile): if max_size and idx == max_size: break data.append(line) return data def filter_raw_data(self, min_len, max_len): """ Preserves only samples which satisfy the following inequality: min_len <= src sample sequence length <= max_len AND min_len <= tgt sample sequence length <= max_len :param min_len: minimum sequence length :param max_len: maximum sequence length """ initial_len = len(self.raw_src) filtered_src = [] filtered_tgt = [] filtered_src_len = [] filtered_tgt_len = [] for src, tgt in zip(self.raw_src, self.raw_tgt): src_len = src.count(' ') + 1 tgt_len = tgt.count(' ') + 1 if min_len <= src_len <= max_len and \ min_len <= tgt_len <= max_len: filtered_src.append(src) filtered_tgt.append(tgt) filtered_src_len.append(src_len) filtered_tgt_len.append(tgt_len) self.raw_src = filtered_src self.raw_tgt = filtered_tgt self.src_len = filtered_src_len self.tgt_len = filtered_tgt_len filtered_len = len(self.raw_src) logging.info(f'Pairs before: {initial_len}, after: {filtered_len}') def __getitem__(self, idx): src = torch.tensor(self.tokenizer.segment(self.raw_src[idx])) tgt = torch.tensor(self.tokenizer.segment(self.raw_tgt[idx])) return src, tgt def __len__(self): return len(self.raw_src)
PyTorch/SpeechSynthesis/FastPitch/triton
triton
dataloader
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from os.path import abspath, dirname sys.path.append(abspath(dirname(__file__)+'/../')) from fastpitch.data_function import TTSCollate, TTSDataset from torch.utils.data import DataLoader import numpy as np import inspect import torch from typing import List from common.text import cmudict def get_dataloader_fn(batch_size: int = 8, precision: str = "fp16", heteronyms_path: str = 'cmudict/heteronyms', cmudict_path: str = 'cmudict/cmudict-0.7b', dataset_path: str = './LJSpeech_1.1', filelist: str ="filelists/ljs_audio_pitch_text_test.txt", text_cleaners: List = ['english_cleaners_v2'], n_mel_channels: int = 80, symbol_set: str ='english_basic', p_arpabet: float = 1.0, n_speakers: int = 1, load_mel_from_disk: bool = False, load_pitch_from_disk: bool = True, pitch_mean: float = 214.72203, # LJSpeech defaults pitch_std: float = 65.72038, max_wav_value: float = 32768.0, sampling_rate: int = 22050, filter_length: int = 1024, hop_length: int = 256, win_length: int = 1024, mel_fmin: float = 0.0, mel_fmax: float = 8000.0): if p_arpabet > 0.0: cmudict.initialize(cmudict_path, heteronyms_path) dataset = TTSDataset(dataset_path=dataset_path, audiopaths_and_text=filelist, text_cleaners=text_cleaners, n_mel_channels=n_mel_channels, symbol_set=symbol_set, p_arpabet=p_arpabet, n_speakers=n_speakers, load_mel_from_disk=load_mel_from_disk, load_pitch_from_disk=load_pitch_from_disk, pitch_mean=pitch_mean, pitch_std=pitch_std, max_wav_value=max_wav_value, sampling_rate=sampling_rate, filter_length=filter_length, hop_length=hop_length, win_length=win_length, mel_fmin=mel_fmin, mel_fmax=mel_fmax) collate_fn = TTSCollate() dataloader = DataLoader(dataset, num_workers=8, shuffle=False, sampler=None, batch_size=batch_size, pin_memory=False, collate_fn=collate_fn) def _get_dataloader(): for idx, batch in enumerate(dataloader): text_padded, _, mel_padded, output_lengths, _, \ pitch_padded, energy_padded, *_ = batch pitch_padded = pitch_padded.float() energy_padded = energy_padded.float() dur_padded = torch.zeros_like(pitch_padded) if precision == "fp16": pitch_padded = pitch_padded.half() dur_padded = dur_padded.half() mel_padded = mel_padded.half() energy_padded = energy_padded.half() ids = np.arange(idx*batch_size, idx*batch_size + batch_size) x = {"INPUT__0": text_padded.cpu().numpy()} y_real = {"OUTPUT__0": mel_padded.cpu().numpy(), "OUTPUT__1": output_lengths.cpu().numpy(), "OUTPUT__2": dur_padded.cpu().numpy(), "OUTPUT__3": pitch_padded.cpu().numpy(), "OUTPUT__4": energy_padded.cpu().numpy()} yield (ids, x, y_real) return _get_dataloader
TensorFlow2/LanguageModeling/BERT
BERT
squad_lib
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library to process data for SQuAD 1.1 and SQuAD 2.0.""" # pylint: disable=g-bad-import-order from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import json import math import six from absl import logging import tensorflow as tf import tokenization # pylint: enable=g-bad-import-order class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (tokenization.printable_text(self.qas_id)) s += ", question_text: %s" % ( tokenization.printable_text(self.question_text)) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) return s class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, input_mask, segment_ids, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible class FeatureWriter(object): """Writes InputFeature to TF example file.""" def __init__(self, filename, is_training): self.filename = filename self.is_training = is_training self.num_features = 0 self._writer = tf.io.TFRecordWriter(filename) def process_feature(self, feature): """Write a InputFeature to the TFRecordWriter as a tf.train.Example.""" self.num_features += 1 def create_int_feature(values): feature = tf.train.Feature( int64_list=tf.train.Int64List(value=list(values))) return feature features = collections.OrderedDict() features["unique_ids"] = create_int_feature([feature.unique_id]) features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) if self.is_training: features["start_positions"] = create_int_feature([feature.start_position]) features["end_positions"] = create_int_feature([feature.end_position]) impossible = 0 if feature.is_impossible: impossible = 1 features["is_impossible"] = create_int_feature([impossible]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) self._writer.write(tf_example.SerializeToString()) def close(self): self._writer.close() def read_squad_examples(input_file, is_training, version_2_with_negative, input_data=None): """Read a SQuAD json file into a list of SquadExample.""" if input_data is None: with tf.io.gfile.GFile(input_file, "r") as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa["is_impossible"] if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join( doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( tokenization.whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: logging.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, output_fn, batch_size=None): """Loads a data file into a list of `InputBatch`s.""" base_id = 1000000000 unique_id = base_id feature = None for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) # The -3 accounts for [CLS], [SEP] and [SEP] max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] token_to_orig_map = {} token_is_max_context = {} segment_ids = [] tokens.append("[CLS]") segment_ids.append(0) for token in query_tokens: tokens.append(token) segment_ids.append(0) tokens.append("[SEP]") segment_ids.append(0) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) segment_ids.append(1) tokens.append("[SEP]") segment_ids.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: doc_offset = len(query_tokens) + 2 start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 2: logging.info("*** Example ***") logging.info("unique_id: %s", (unique_id)) logging.info("example_index: %s", (example_index)) logging.info("doc_span_index: %s", (doc_span_index)) logging.info("tokens: %s", " ".join([tokenization.printable_text(x) for x in tokens])) logging.info( "token_to_orig_map: %s", " ".join([ "%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map) ])) logging.info( "token_is_max_context: %s", " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) logging.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logging.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) if is_training and example.is_impossible: logging.info("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) logging.info("start_position: %d", (start_position)) logging.info("end_position: %d", (end_position)) logging.info("answer: %s", tokenization.printable_text(answer_text)) feature = InputFeatures( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible) # Run callback if is_training: output_fn(feature) else: output_fn(feature, is_padding=False) unique_id += 1 if not is_training and feature: assert batch_size num_padding = 0 num_examples = unique_id - base_id if unique_id % batch_size != 0: num_padding = batch_size - (num_examples % batch_size) logging.info("Adding padding examples to make sure no partial batch.") logging.info("Adds %d padding examples for inference.", num_padding) dummy_feature = copy.deepcopy(feature) for _ in range(num_padding): dummy_feature.unique_id = unique_id # Run callback output_fn(feature, is_padding=True) unique_id += 1 return unique_id - base_id def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"]) def get_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, version_2_with_negative=False, null_score_diff_threshold=0.0, verbose=False): example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min mull score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens): continue if end_index >= len(feature.tokens): continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index])) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit)) prelim_predictions = sorted( prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"]) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] tok_text = " ".join(tok_tokens) # De-tokenize WordPieces that have been split off. tok_text = tok_text.replace(" ##", "") tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text( tok_text, orig_text, do_lower_case, verbose=verbose) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append( _NbestPrediction( text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't inlude the empty option in the n-best, inlcude it if version_2_with_negative: if "" not in seen_predictions: nbest.append( _NbestPrediction( text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append( _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1 total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1 if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # pytype: disable=attribute-error # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - ( best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff if score_diff > null_score_diff_threshold: all_predictions[example.qas_id] = "" else: all_predictions[example.qas_id] = best_non_null_entry.text # pytype: enable=attribute-error all_nbest_json[example.qas_id] = nbest_json return all_predictions, all_nbest_json, scores_diff_json def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, version_2_with_negative=False, null_score_diff_threshold=0.0, verbose=False): """Write final predictions to the json file and log-odds of null if needed.""" logging.info("Writing predictions to: %s", (output_prediction_file)) logging.info("Writing nbest to: %s", (output_nbest_file)) all_predictions, all_nbest_json, scores_diff_json = get_predictions( all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, version_2_with_negative, null_score_diff_threshold, verbose) with tf.io.gfile.GFile(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") with tf.io.gfile.GFile(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: with tf.io.gfile.GFile(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") def get_final_text(pred_text, orig_text, do_lower_case, verbose=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heruistic between # `pred_text` and `orig_text` to get a character-to-charcter alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose: logging.info("Unable to find text: '%s' in '%s'", pred_text, orig_text) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose: logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in six.iteritems(tok_ns_to_s_map): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose: logging.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose: logging.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position:(orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): # pylint: disable=consider-using-enumerate if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def generate_tf_record_from_json_file(input_file_path, vocab_file_path, output_path, max_seq_length=384, do_lower_case=True, max_query_length=64, doc_stride=128, version_2_with_negative=False): """Generates and saves training data into a tf record file.""" train_examples = read_squad_examples( input_file=input_file_path, is_training=True, version_2_with_negative=version_2_with_negative) tokenizer = tokenization.FullTokenizer( vocab_file=vocab_file_path, do_lower_case=do_lower_case) train_writer = FeatureWriter(filename=output_path, is_training=True) number_of_examples = convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=doc_stride, max_query_length=max_query_length, is_training=True, output_fn=train_writer.process_feature) train_writer.close() meta_data = { "task_type": "bert_squad", "train_data_size": number_of_examples, "max_seq_length": max_seq_length, "max_query_length": max_query_length, "doc_stride": doc_stride, "version_2_with_negative": version_2_with_negative, } return meta_data
PaddlePaddle/LanguageModeling/BERT/utils
utils
save_load
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import os import re import io import shutil import tempfile import logging import json import paddle import numpy as np from utils.task import Task _PROGRESS_SUFFIX = '_progress.json' _PDOPT_SUFFIX = '.pdopt' _PDPARAMS_SUFFIX = '.pdparams' def mkdir_if_not_exist(path): """ Mkdir if not exists, ignore the exception when multiprocess mkdir together. """ if not os.path.exists(path): try: os.makedirs(path) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(path): logging.warning( f"be happy if some process has already created {path}") else: raise OSError(f"Failed to mkdir {path}") def load_train_progress(progress_file): """ Load train progress info (such as file_list, epoch_id, step_id) from a given file, which is used to resume training. Args: progress_file(str): Path to a file named `progress.json` with progress info. Returns: pregress_dict(dict): A dict with progress info. """ progress_dict = {} if os.path.isfile(progress_file): with open(progress_file, "r", encoding='utf-8') as reader: json_obj = json.loads(reader.read()) for k, v in json_obj.items(): progress_dict[k] = v else: logging.warning("progress file is not found") return progress_dict def _load_state(path): """ Load model parameters from .pdparams file. Args: path(str): Path to .pdparams file. Returns: state(dict): Dict of parameters loaded from file. """ if os.path.exists(path + _PDOPT_SUFFIX): tmp = tempfile.mkdtemp() dst = os.path.join(tmp, os.path.basename(os.path.normpath(path))) shutil.copy(path + _PDPARAMS_SUFFIX, dst + _PDPARAMS_SUFFIX) state = paddle.static.load_program_state(dst) shutil.rmtree(tmp) else: state = paddle.static.load_program_state(path) return state def load_params(prog, path, ignore_params=None): """ Load model from the given path. Args: prog (paddle.static.Program): Load weight to which Program object. path (string): Model path. ignore_params (list): Ignore variable to load when finetuning. """ if not (os.path.isdir(path) or os.path.exists(path + _PDPARAMS_SUFFIX)): raise ValueError(f"Model pretrain path {path} does not exists.") logging.info(f"Loading parameters from {path}...") ignore_set = set() state = _load_state(path) # ignore the parameter which mismatch the shape # between the model and pretrain weight. all_var_shape = {} for block in prog.blocks: for param in block.all_parameters(): all_var_shape[param.name] = param.shape ignore_set.update([ name for name, shape in all_var_shape.items() if name in state and shape != state[name].shape ]) if ignore_params: all_var_names = [var.name for var in prog.list_vars()] ignore_list = filter( lambda var: any([re.match(name, var) for name in ignore_params]), all_var_names) ignore_set.update(list(ignore_list)) if len(ignore_set) > 0: for k in ignore_set: if k in state: logging.warning( f"variable {k} is already excluded automatically") del state[k] for n, p in state.items(): state[n] = p.astype(np.float32) paddle.static.set_program_state(prog, state) def init_ckpt(path_to_ckpt, program, exe): """ Init from checkpoints or pretrained model in given path. Args: path_to_ckpt(str): The path to files of checkpoints, including '.pdparams' and '.pdopt'. program(paddle.static.Program): The program to init model. exe(paddle.static.Executor): The executor to run program. """ if path_to_ckpt: paddle.static.load(program, path_to_ckpt, exe) logging.info(f"Finish initing checkpoint from {path_to_ckpt}") return def init_pretrained(path_to_pretrained, program): """ Init from checkpoints or pretrained model in given path. Args: path_to_pretrained(str): The path to file of pretrained model. program(paddle.static.Program): The program to init model. """ if path_to_pretrained: if not isinstance(path_to_pretrained, list): pretrained_model = [path_to_pretrained] for pretrain in pretrained_model: load_params(program, pretrain) logging.info( f"Finish initing pretrained model from {pretrained_model}") def reset_program_state_dict(model, pretrained_file=None): """ Initialize the parameter from the bert config, and set the parameter by reseting the state dict." """ state_dict = model.state_dict() pretrained_state_dict = None if pretrained_file is not None: pretrained_state_dict = _load_state(pretrained_file) reset_state_dict = {} scale = model.bert.bert_config.initializer_range reset_parameter_names = [] for n, p in state_dict.items(): if pretrained_state_dict is not None and n in pretrained_state_dict: reset_state_dict[p.name] = np.array( pretrained_state_dict[n], dtype=np.float32) reset_parameter_names.append(n) elif pretrained_state_dict is not None and p.name in pretrained_state_dict and "bert" in n: reset_state_dict[p.name] = np.array( pretrained_state_dict[p.name], dtype=np.float32) reset_parameter_names.append(n) elif "layer_norm" not in p.name and "b_0" not in p.name: reset_state_dict[p.name] = np.random.normal( loc=0.0, scale=scale, size=p.shape).astype("float32") logging.info( f"the following parameter had reset, please check. {reset_parameter_names}" ) return reset_state_dict def init_program(args, program, exe, model, task=Task.pretrain): """ Init from given checkpoint or pretrained parameters. Args: args(Namespace): Arguments obtained from ArgumentParser. program(paddle.static.Program): The program to init model. exe(paddle.static.Executor): The executor to run program. model(paddle.nn.Layer): An instance of BERT model defined in modeling.py. """ progress = None if args.from_checkpoint is not None: init_ckpt(args.from_checkpoint, program, exe) progress = load_train_progress(args.from_checkpoint + _PROGRESS_SUFFIX) #elif task == Task.pretrain and args.from_pretrained_params is not None: elif args.from_pretrained_params is not None: init_pretrained(args.from_pretrained_params, program) else: reset_state_dict = reset_program_state_dict( model, args.from_pretrained_params) paddle.static.set_program_state(program, reset_state_dict) return progress def save_model(program, model_path, prefix, progress=None): """ Save a model to given path. Args: program(paddle.static.Program): The program to be saved. model_path(str): The path to save model. prefix(str): The prefix of model files. """ if paddle.distributed.get_rank() != 0: return mkdir_if_not_exist(model_path) model_prefix = os.path.join(model_path, prefix) paddle.static.save(program, model_prefix) if progress is not None: progress_file = os.path.join(model_path, prefix + _PROGRESS_SUFFIX) out_json = json.dumps(progress, indent=2, sort_keys=True) + "\n" with io.open(progress_file, 'w', encoding="utf-8") as f: f.write(out_json) logging.info(f"Already save model in {model_path}")
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests
tests
test_with_opts
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash set -e set -x NAMES=${1:-'*.yaml'} TARGET=feature_specs/${NAMES} OPTIONS=${2-""} for file in ${TARGET}; do echo "${file}"; done for fspec_file in ${TARGET}; do SYNTH_DATA_DIR=/tmp/generated_data/${fspec_file} # generate data based on fspec python /dlrm/prepare_synthetic_dataset.py --feature_spec ${fspec_file} --synthetic_dataset_dir ${SYNTH_DATA_DIR} # single-GPU A100-80GB #horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u /dlrm/main.py --dataset_path ${SYNTH_DATA_DIR} ${OPTIONS} # single-GPU V100-32GB #horovodrun -np 1 -H localhost:1 --mpi-args=--oversubscribe numactl --interleave=all -- python -u /dlrm/main.py --dataset_path ${SYNTH_DATA_DIR} ${OPTIONS} # delete the data rm -r ${SYNTH_DATA_DIR} done # # usage: # docker build . -t nvidia_dlrm_tf # docker run --security-opt seccomp=unconfined --runtime=nvidia -it --rm --ipc=host -v ${PWD}/data:/data nvidia_dlrm_tf bash # cd tests # bash test_with_opts.sh
TensorFlow2/Segmentation
Segmentation
README
# Segmentation Image Segmentation is the field of image processing that deals with separating the image into multiple subgroups or regions (such as pixels set, also known as image segments) representing distinctive objects or its subparts. Nowadays, we are constantly making interpretations of the world around us through cameras and other devices. Therefore image segmentation has become an integral part of our lives, since it's an indispensable technique for teaching the devices how to process this interpretation, how to understand the world around them. In this collection, we will cover: - What is image segmentation? - Types of image segmentation - How does image segmentation work? - Use-cases and applications - Where to get started --- ## What is image segmentation? Image segmentation is a computer vision process by which a digital image is divided into various categories or segments. We use this method to understand what is depicted using a pixel-wise classification of the image. It is very much distinct from image classification, which allots labels to an entire image; object detection identifies and locates objects within an image by drawing bounding boxes around them. Image segmentation presents more pixel-level knowledge about the image content. Consider a road side scenario with pedestrians, cars and lights: ![](img/3_image-segmentation-figure-1.png) This photo is made up of an immense number of individual pixels, and image segmentation aims to assign each of those pixels to the object to which it belongs. Segmentation of an image enables us to segregate the foreground from the background, identify a road or a car's precise location, and mark the margins that separate a pedestrian from a car or road. --- ## Types of image segmentation Image segmentation tasks can be broken down into two broad categories: semantic segmentation and instance segmentation. 1. Semantic segmentation:- This is the process of classifying each pixel belonging to a particular label. It doesn't different across different instances of the same object. For example if there are 2 cats in an image, semantic segmentation gives same label to all the pixels of both cats 2. Instance segmentation:- This differs from semantic segmentation in the sense that it gives a unique label to every instance of a particular object in the image. As can be seen in the image above all 3 dogs are assigned different colours i.e different labels. With semantic segmentation all of them would have been assigned the same colour. --- ## How does image segmentation work? Let's consider image segmentation as a function. An image is given as input to the function and it gives a matrix or a mask as the output, where each element tells us which class or instance that pixel belongs to. Machine learning moves towards image segmentation train models to recognize which features of an image are crucial, rather than designing bespoke heuristics by hand. Although deep neural networks architectures for image segmentation may differ in implementation, most follows similar basis structure: ![](img/3_image-segmentation-figure-2.png) Source - [SegNet Paper](https://arxiv.org/pdf/1511.00561.pdf) - The encoder: A set of layers that extract features of an image through a sequence of progressively narrower and deeper filters. Oftentimes, the encoder is pre-trained on a different task (like image recognition), where it learns statistical correlations from many images and may transfer that knowledge for the purposes of segmentation. - The Decoder: A set of layers that progressively grows the output of the encoder into a segmentation mask resembling the pixel resolution of the input image. - Skip connections: Long range connections in the neural network that allow the model to draw on features at varying spatial scales to improve model accuracy. Most of the architectures used for segmentation tasks are built on the technique of Fully Convolutional Network (FCN) i.e., the architecture contains convolution layers instead of any Dense or Max Pool layers. Though various models support the FCN technique, a few handpicked models generally used in production are - UNet, MaskRCNN, and DeepLabv3. --- ## Use-cases and applications Image Segmentation can be useful for a lot of different use-cases - handwriting recognition, virtual try-on, visual image search, road scene segmentation, organ segmentation and much more. Here are the few applications explained in detail: ### Autonomous vehicles: There are a lot of things that needs your attention while driving- the road, other vehicles, pedestrians, sidewalks, and (potentially) a plethora of other potential obstacles/safety hazards. If you’ve been driving for a long time, noticing and reacting to this environment might seem automatic or like second nature. In case of a self driving car, it would be a quick observation that this car needs to see, interpret, and respond to a scene in real-time. This implies the need to create pixel-level map of the world through the camera system in this vehicle in order to navigate it safely and efficiently. Even though the field of autonomous machines/automobiles is much more complex than performing segmentation, this pixel-level understanding is a essential ingredient in a step towards reality. ![](img/3_image-segmentation-figure-3.png) ### Medical imaging and diagnostics: In the initial steps of a diagnostic and treatment pipeline for many conditions that require medical images, such as CT or MRI scans, image segmentation can be used as a powerful technique. Essentially, segmentation can effectively distinguish and separate homogeneous areas that may include particularly important pixels of organs, lesions, etc. However, there are significant challenges, including low contrast, noise, and various other imaging ambiguities. ![](img/3_image-segmentation-figure-4.png) ### Virtual try-on: Virtual try on clothes is quite a fascinating feature which was available in stores using specialized hardware which creates a 3d model. But interestingly with deep learning and image segmentation the same can be obtained using just a 2d image. ![](img/3_image-segmentation-figure-5.png) --- ## Where to get started NVIDIA provides Deep Learning Examples for Image Segmentation on its GitHub repository. These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case. Here are the examples relevant for image segmentation, directly from [Deep Learning Examples](https://github.com/NVIDIA/DeepLearningExamples): 1. 3D UNet for Medical Image Segmentation using Tensorflow 1.x - [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_3D_Medical) - Uses TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow) 2. 2D UNet for Industrial Defect Segmentation using Tensorflow 1.x - [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/Segmentation/UNet_Industrial) - Uses TensorFlow 20.06-tf1-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-tensorflow) 3. MaskRCNN for Common Objects Segmentation using PyTorch - [Git repository](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Segmentation/MaskRCNN) - Uses PyTorch 20.06-py3 [NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch)
TensorFlow/Detection/SSD/models/research/object_detection
object_detection
exporter_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.export_inference_graph.""" import os import numpy as np import six import tensorflow as tf from google.protobuf import text_format from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from object_detection import exporter from object_detection.builders import graph_rewriter_builder from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import graph_rewriter_pb2 from object_detection.protos import pipeline_pb2 from object_detection.utils import ops if six.PY2: import mock # pylint: disable=g-import-not-at-top else: from unittest import mock # pylint: disable=g-import-not-at-top slim = tf.contrib.slim class FakeModel(model.DetectionModel): def __init__(self, add_detection_keypoints=False, add_detection_masks=False): self._add_detection_keypoints = add_detection_keypoints self._add_detection_masks = add_detection_masks def preprocess(self, inputs): true_image_shapes = [] # Doesn't matter for the fake model. return tf.identity(inputs), true_image_shapes def predict(self, preprocessed_inputs, true_image_shapes): return {'image': tf.layers.conv2d(preprocessed_inputs, 3, 1)} def postprocess(self, prediction_dict, true_image_shapes): with tf.control_dependencies(prediction_dict.values()): postprocessed_tensors = { 'detection_boxes': tf.constant([[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6], [0.9, 0.0]], tf.float32), 'detection_classes': tf.constant([[0, 1], [1, 0]], tf.float32), 'num_detections': tf.constant([2, 1], tf.float32) } if self._add_detection_keypoints: postprocessed_tensors['detection_keypoints'] = tf.constant( np.arange(48).reshape([2, 2, 6, 2]), tf.float32) if self._add_detection_masks: postprocessed_tensors['detection_masks'] = tf.constant( np.arange(64).reshape([2, 2, 4, 4]), tf.float32) return postprocessed_tensors def restore_map(self, checkpoint_path, fine_tune_checkpoint_type): pass def loss(self, prediction_dict, true_image_shapes): pass def regularization_losses(self): pass def updates(self): pass class ExportInferenceGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages, enable_quantization=False): g = tf.Graph() with g.as_default(): mock_model = FakeModel() preprocessed_inputs, true_image_shapes = mock_model.preprocess( tf.placeholder(tf.float32, shape=[None, None, None, 3])) predictions = mock_model.predict(preprocessed_inputs, true_image_shapes) mock_model.postprocess(predictions, true_image_shapes) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() tf.train.get_or_create_global_step() if enable_quantization: graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path) def _load_inference_graph(self, inference_graph_path, is_binary=True): od_graph = tf.Graph() with od_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(inference_graph_path) as fid: if is_binary: od_graph_def.ParseFromString(fid.read()) else: text_format.Parse(fid.read(), od_graph_def) tf.import_graph_def(od_graph_def, name='') return od_graph def _create_tf_example(self, image_array): with self.test_session(): encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_feature(encoded_image), 'image/format': _bytes_feature('jpg'), 'image/source_id': _bytes_feature('image_id') })).SerializeToString() return example def test_export_graph_with_image_tensor_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def test_write_inference_graph(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, write_inference_graph=True) self.assertTrue(os.path.exists(os.path.join( output_directory, 'inference_graph.pbtxt'))) def test_export_graph_with_fixed_size_image_tensor_input(self): input_shape = [1, 320, 320, 3] tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, input_shape=input_shape) saved_model_path = os.path.join(output_directory, 'saved_model') self.assertTrue( os.path.exists(os.path.join(saved_model_path, 'saved_model.pb'))) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name image_tensor = od_graph.get_tensor_by_name(input_tensor_name) self.assertSequenceEqual(image_tensor.get_shape().as_list(), input_shape) def test_export_graph_with_tf_example_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def test_export_graph_with_encoded_image_string_input(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() output_directory = os.path.join(tmp_dir, 'output') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) def _get_variables_in_checkpoint(self, checkpoint_file): return set([ var_name for var_name, _ in tf.train.list_variables(checkpoint_file)]) def test_replace_variable_values_with_moving_averages(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') new_checkpoint_prefix = os.path.join(tmp_dir, 'new.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) graph = tf.Graph() with graph.as_default(): fake_model = FakeModel() preprocessed_inputs, true_image_shapes = fake_model.preprocess( tf.placeholder(dtype=tf.float32, shape=[None, None, None, 3])) predictions = fake_model.predict(preprocessed_inputs, true_image_shapes) fake_model.postprocess(predictions, true_image_shapes) exporter.replace_variable_values_with_moving_averages( graph, trained_checkpoint_prefix, new_checkpoint_prefix) expected_variables = set(['conv2d/bias', 'conv2d/kernel']) variables_in_old_ckpt = self._get_variables_in_checkpoint( trained_checkpoint_prefix) self.assertIn('conv2d/bias/ExponentialMovingAverage', variables_in_old_ckpt) self.assertIn('conv2d/kernel/ExponentialMovingAverage', variables_in_old_ckpt) variables_in_new_ckpt = self._get_variables_in_checkpoint( new_checkpoint_prefix) self.assertTrue(expected_variables.issubset(variables_in_new_ckpt)) self.assertNotIn('conv2d/bias/ExponentialMovingAverage', variables_in_new_ckpt) self.assertNotIn('conv2d/kernel/ExponentialMovingAverage', variables_in_new_ckpt) def test_export_graph_with_moving_averages(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = True exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) self.assertTrue(os.path.exists(os.path.join( output_directory, 'saved_model', 'saved_model.pb'))) expected_variables = set(['conv2d/bias', 'conv2d/kernel', 'global_step']) actual_variables = set( [var_name for var_name, _ in tf.train.list_variables(output_directory)]) self.assertTrue(expected_variables.issubset(actual_variables)) def test_export_model_with_quantization_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model( trained_checkpoint_prefix, use_moving_averages=False, enable_quantization=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'inference_graph.pbtxt') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() text_format.Merge( """graph_rewriter { quantization { delay: 50000 activation_bits: 8 weight_bits: 8 } }""", pipeline_config) exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory, write_inference_graph=True) self._load_inference_graph(inference_graph_path, is_binary=False) has_quant_nodes = False for v in tf.global_variables(): if v.op.name.endswith('act_quant/min'): has_quant_nodes = True break self.assertTrue(has_quant_nodes) def test_export_model_with_all_output_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph): inference_graph.get_tensor_by_name('image_tensor:0') inference_graph.get_tensor_by_name('detection_boxes:0') inference_graph.get_tensor_by_name('detection_scores:0') inference_graph.get_tensor_by_name('detection_classes:0') inference_graph.get_tensor_by_name('detection_keypoints:0') inference_graph.get_tensor_by_name('detection_masks:0') inference_graph.get_tensor_by_name('num_detections:0') def test_export_model_with_detection_only_nodes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(add_detection_masks=False) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph): inference_graph.get_tensor_by_name('image_tensor:0') inference_graph.get_tensor_by_name('detection_boxes:0') inference_graph.get_tensor_by_name('detection_scores:0') inference_graph.get_tensor_by_name('detection_classes:0') inference_graph.get_tensor_by_name('num_detections:0') with self.assertRaises(KeyError): inference_graph.get_tensor_by_name('detection_keypoints:0') inference_graph.get_tensor_by_name('detection_masks:0') def test_export_and_run_inference_with_image_tensor(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={image_tensor: np.ones((2, 4, 4, 3)).astype(np.uint8)}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def _create_encoded_image_string(self, image_array_np, encoding_format): od_graph = tf.Graph() with od_graph.as_default(): if encoding_format == 'jpg': encoded_string = tf.image.encode_jpeg(image_array_np) elif encoding_format == 'png': encoded_string = tf.image.encode_png(image_array_np) else: raise ValueError('Supports only the following formats: `jpg`, `png`') with self.test_session(graph=od_graph): return encoded_string.eval() def test_export_and_run_inference_with_encoded_image_string_tensor(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) jpg_image_str = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'jpg') png_image_str = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'png') with self.test_session(graph=inference_graph) as sess: image_str_tensor = inference_graph.get_tensor_by_name( 'encoded_image_string_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') for image_str in [jpg_image_str, png_image_str]: image_str_batch_np = np.hstack([image_str]* 2) (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={image_str_tensor: image_str_batch_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_raise_runtime_error_on_images_with_different_sizes(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='encoded_image_string_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) large_image = self._create_encoded_image_string( np.ones((4, 4, 3)).astype(np.uint8), 'jpg') small_image = self._create_encoded_image_string( np.ones((2, 2, 3)).astype(np.uint8), 'jpg') image_str_batch_np = np.hstack([large_image, small_image]) with self.test_session(graph=inference_graph) as sess: image_str_tensor = inference_graph.get_tensor_by_name( 'encoded_image_string_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') with self.assertRaisesRegexp(tf.errors.InvalidArgumentError, 'TensorArray.*shape'): sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={image_str_tensor: image_str_batch_np}) def test_export_and_run_inference_with_tf_example(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) inference_graph = self._load_inference_graph(inference_graph_path) tf_example_np = np.expand_dims(self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8)), axis=0) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_frozen_graph(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') inference_graph_path = os.path.join(output_directory, 'frozen_inference_graph.pb') tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) outputs, _ = exporter._build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) output_node_names = ','.join(outputs.keys()) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() exporter.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=trained_checkpoint_prefix, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph=inference_graph_path, clear_devices=True, initializer_nodes='') inference_graph = self._load_inference_graph(inference_graph_path) tf_example_np = np.expand_dims(self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8)), axis=0) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') keypoints = inference_graph.get_tensor_by_name('detection_keypoints:0') masks = inference_graph.get_tensor_by_name('detection_masks:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_export_graph_saves_pipeline_file(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=True) output_directory = os.path.join(tmp_dir, 'output') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel() pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) expected_pipeline_path = os.path.join( output_directory, 'pipeline.config') self.assertTrue(os.path.exists(expected_pipeline_path)) written_pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() with tf.gfile.GFile(expected_pipeline_path, 'r') as f: proto_str = f.read() text_format.Merge(proto_str, written_pipeline_config) self.assertProtoEquals(pipeline_config, written_pipeline_config) def test_export_saved_model_and_run_inference(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name tf_example = od_graph.get_tensor_by_name(input_tensor_name) boxes = od_graph.get_tensor_by_name( signature.outputs['detection_boxes'].name) scores = od_graph.get_tensor_by_name( signature.outputs['detection_scores'].name) classes = od_graph.get_tensor_by_name( signature.outputs['detection_classes'].name) keypoints = od_graph.get_tensor_by_name( signature.outputs['detection_keypoints'].name) masks = od_graph.get_tensor_by_name( signature.outputs['detection_masks'].name) num_detections = od_graph.get_tensor_by_name( signature.outputs['num_detections'].name) (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_saved_model(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') saved_model_path = os.path.join(output_directory, 'saved_model') tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) outputs, placeholder_tensor = exporter._build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) output_node_names = ','.join(outputs.keys()) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() frozen_graph_def = exporter.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=input_saver_def, input_checkpoint=trained_checkpoint_prefix, output_node_names=output_node_names, restore_op_name='save/restore_all', filename_tensor_name='save/Const:0', output_graph='', clear_devices=True, initializer_nodes='') exporter.write_saved_model( saved_model_path=saved_model_path, frozen_graph_def=frozen_graph_def, inputs=placeholder_tensor, outputs=outputs) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: meta_graph = tf.saved_model.loader.load( sess, [tf.saved_model.tag_constants.SERVING], saved_model_path) signature = meta_graph.signature_def['serving_default'] input_tensor_name = signature.inputs['inputs'].name tf_example = od_graph.get_tensor_by_name(input_tensor_name) boxes = od_graph.get_tensor_by_name( signature.outputs['detection_boxes'].name) scores = od_graph.get_tensor_by_name( signature.outputs['detection_scores'].name) classes = od_graph.get_tensor_by_name( signature.outputs['detection_classes'].name) keypoints = od_graph.get_tensor_by_name( signature.outputs['detection_keypoints'].name) masks = od_graph.get_tensor_by_name( signature.outputs['detection_masks'].name) num_detections = od_graph.get_tensor_by_name( signature.outputs['num_detections'].name) (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_export_checkpoint_and_run_inference(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') model_path = os.path.join(output_directory, 'model.ckpt') meta_graph_path = model_path + '.meta' with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, trained_checkpoint_prefix=trained_checkpoint_prefix, output_directory=output_directory) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: new_saver = tf.train.import_meta_graph(meta_graph_path) new_saver.restore(sess, model_path) tf_example = od_graph.get_tensor_by_name('tf_example:0') boxes = od_graph.get_tensor_by_name('detection_boxes:0') scores = od_graph.get_tensor_by_name('detection_scores:0') classes = od_graph.get_tensor_by_name('detection_classes:0') keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') masks = od_graph.get_tensor_by_name('detection_masks:0') num_detections = od_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_write_graph_and_checkpoint(self): tmp_dir = self.get_temp_dir() trained_checkpoint_prefix = os.path.join(tmp_dir, 'model.ckpt') self._save_checkpoint_from_mock_model(trained_checkpoint_prefix, use_moving_averages=False) output_directory = os.path.join(tmp_dir, 'output') model_path = os.path.join(output_directory, 'model.ckpt') meta_graph_path = model_path + '.meta' tf.gfile.MakeDirs(output_directory) with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel( add_detection_keypoints=True, add_detection_masks=True) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False detection_model = model_builder.build(pipeline_config.model, is_training=False) exporter._build_detection_graph( input_type='tf_example', detection_model=detection_model, input_shape=None, output_collection_name='inference_op', graph_hook_fn=None) saver = tf.train.Saver() input_saver_def = saver.as_saver_def() exporter.write_graph_and_checkpoint( inference_graph_def=tf.get_default_graph().as_graph_def(), model_path=model_path, input_saver_def=input_saver_def, trained_checkpoint_prefix=trained_checkpoint_prefix) tf_example_np = np.hstack([self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))] * 2) with tf.Graph().as_default() as od_graph: with self.test_session(graph=od_graph) as sess: new_saver = tf.train.import_meta_graph(meta_graph_path) new_saver.restore(sess, model_path) tf_example = od_graph.get_tensor_by_name('tf_example:0') boxes = od_graph.get_tensor_by_name('detection_boxes:0') scores = od_graph.get_tensor_by_name('detection_scores:0') classes = od_graph.get_tensor_by_name('detection_classes:0') keypoints = od_graph.get_tensor_by_name('detection_keypoints:0') masks = od_graph.get_tensor_by_name('detection_masks:0') num_detections = od_graph.get_tensor_by_name('num_detections:0') (boxes_np, scores_np, classes_np, keypoints_np, masks_np, num_detections_np) = sess.run( [boxes, scores, classes, keypoints, masks, num_detections], feed_dict={tf_example: tf_example_np}) self.assertAllClose(boxes_np, [[[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], [[0.5, 0.5, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]]]) self.assertAllClose(scores_np, [[0.7, 0.6], [0.9, 0.0]]) self.assertAllClose(classes_np, [[1, 2], [2, 1]]) self.assertAllClose(keypoints_np, np.arange(48).reshape([2, 2, 6, 2])) self.assertAllClose(masks_np, np.arange(64).reshape([2, 2, 4, 4])) self.assertAllClose(num_detections_np, [2, 1]) def test_rewrite_nn_resize_op(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) s = ops.nearest_neighbor_upsampling(x, 2) t = s + y exporter.rewrite_nn_resize_op() resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0], x) self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) def test_rewrite_nn_resize_op_quantized(self): g = tf.Graph() with g.as_default(): x = array_ops.placeholder(dtypes.float32, shape=(8, 10, 10, 8)) x_conv = tf.contrib.slim.conv2d(x, 8, 1) y = array_ops.placeholder(dtypes.float32, shape=(8, 20, 20, 8)) s = ops.nearest_neighbor_upsampling(x_conv, 2) t = s + y graph_rewriter_config = graph_rewriter_pb2.GraphRewriter() graph_rewriter_config.quantization.delay = 500000 graph_rewriter_fn = graph_rewriter_builder.build( graph_rewriter_config, is_training=False) graph_rewriter_fn() exporter.rewrite_nn_resize_op(is_quantized=True) resize_op_found = False for op in g.get_operations(): if op.type == 'ResizeNearestNeighbor': resize_op_found = True self.assertEqual(op.inputs[0].op.type, 'FakeQuantWithMinMaxVars') self.assertEqual(op.outputs[0].consumers()[0], t.op) break self.assertTrue(resize_op_found) if __name__ == '__main__': tf.test.main()
TensorFlow/Detection/SSD/models/research/object_detection/predictors
predictors
mask_rcnn_box_predictor
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Mask R-CNN Box Predictor.""" import tensorflow as tf from object_detection.core import box_predictor slim = tf.contrib.slim BOX_ENCODINGS = box_predictor.BOX_ENCODINGS CLASS_PREDICTIONS_WITH_BACKGROUND = ( box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND) MASK_PREDICTIONS = box_predictor.MASK_PREDICTIONS class MaskRCNNBoxPredictor(box_predictor.BoxPredictor): """Mask R-CNN Box Predictor. See Mask R-CNN: He, K., Gkioxari, G., Dollar, P., & Girshick, R. (2017). Mask R-CNN. arXiv preprint arXiv:1703.06870. This is used for the second stage of the Mask R-CNN detector where proposals cropped from an image are arranged along the batch dimension of the input image_features tensor. Notice that locations are *not* shared across classes, thus for each anchor, a separate prediction is made for each class. In addition to predicting boxes and classes, optionally this class allows predicting masks and/or keypoints inside detection boxes. Currently this box predictor makes per-class predictions; that is, each anchor makes a separate box prediction for each class. """ def __init__(self, is_training, num_classes, box_prediction_head, class_prediction_head, third_stage_heads): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: number of classes. Note that num_classes *does not* include the background category, so if groundtruth labels take values in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the assigned classification targets can range from {0,... K}). box_prediction_head: The head that predicts the boxes in second stage. class_prediction_head: The head that predicts the classes in second stage. third_stage_heads: A dictionary mapping head names to mask rcnn head classes. """ super(MaskRCNNBoxPredictor, self).__init__(is_training, num_classes) self._box_prediction_head = box_prediction_head self._class_prediction_head = class_prediction_head self._third_stage_heads = third_stage_heads @property def num_classes(self): return self._num_classes def get_second_stage_prediction_heads(self): return BOX_ENCODINGS, CLASS_PREDICTIONS_WITH_BACKGROUND def get_third_stage_prediction_heads(self): return sorted(self._third_stage_heads.keys()) def _predict(self, image_features, num_predictions_per_location, prediction_stage=2): """Optionally computes encoded object locations, confidences, and masks. Predicts the heads belonging to the given prediction stage. Args: image_features: A list of float tensors of shape [batch_size, height_i, width_i, channels_i] containing roi pooled features for each image. The length of the list should be 1 otherwise a ValueError will be raised. num_predictions_per_location: A list of integers representing the number of box predictions to be made per spatial location for each feature map. Currently, this must be set to [1], or an error will be raised. prediction_stage: Prediction stage. Acceptable values are 2 and 3. Returns: A dictionary containing the predicted tensors that are listed in self._prediction_heads. A subset of the following keys will exist in the dictionary: BOX_ENCODINGS: A float tensor of shape [batch_size, 1, num_classes, code_size] representing the location of the objects. CLASS_PREDICTIONS_WITH_BACKGROUND: A float tensor of shape [batch_size, 1, num_classes + 1] representing the class predictions for the proposals. MASK_PREDICTIONS: A float tensor of shape [batch_size, 1, num_classes, image_height, image_width] Raises: ValueError: If num_predictions_per_location is not 1 or if len(image_features) is not 1. ValueError: if prediction_stage is not 2 or 3. """ if (len(num_predictions_per_location) != 1 or num_predictions_per_location[0] != 1): raise ValueError('Currently FullyConnectedBoxPredictor only supports ' 'predicting a single box per class per location.') if len(image_features) != 1: raise ValueError('length of `image_features` must be 1. Found {}'.format( len(image_features))) image_feature = image_features[0] predictions_dict = {} if prediction_stage == 2: predictions_dict[BOX_ENCODINGS] = self._box_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) predictions_dict[CLASS_PREDICTIONS_WITH_BACKGROUND] = ( self._class_prediction_head.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0])) elif prediction_stage == 3: for prediction_head in self.get_third_stage_prediction_heads(): head_object = self._third_stage_heads[prediction_head] predictions_dict[prediction_head] = head_object.predict( features=image_feature, num_predictions_per_location=num_predictions_per_location[0]) else: raise ValueError('prediction_stage should be either 2 or 3.') return predictions_dict
TensorFlow/LanguageModeling/BERT/notebooks
notebooks
biobert_ner_tf_inference
#!/usr/bin/env python # coding: utf-8 # In[ ]: # Copyright 2021 NVIDIA Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # <img src="http://developer.download.nvidia.com/compute/machine-learning/frameworks/nvidia_logo.png" style="width: 90px; float: right;"> # # # BioBERT Named-Entity Recognition Inference with Mixed Precision # # ## 1. Overview # # Bidirectional Embedding Representations from Transformers (BERT), is a method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. # # BioBERT is a domain specific version of BERT that has been trained on PubMed abstracts. # # The original BioBERT paper can be found here: https://arxiv.org/abs/1901.08746 # # NVIDIA's BioBERT is an optimized version of the implementation presented in the paper, leveraging mixed precision arithmetic and tensor cores on V100 GPUS for faster training times while maintaining target accuracy. # ### 1.a Learning objectives # # This notebook demonstrates: # - Inference on NER task with BioBERT model # - The use/download of fine-tuned NVIDIA BioBERT models # - Use of Mixed Precision for Inference # ## 2. Requirements # # Please refer to the ReadMe file # ## 3. BioBERT Inference: Named-Entity Recognition # # We can run inference on a fine-tuned BioBERT model for tasks like Named-Entity Recognition. # # Here we use a BioBERT model fine-tuned on a [BC5CDR-disease Dataset](https://www.ncbi.nlm.nih.gov/research/bionlp/Data/) which consists of 1500 PubMed articles with 5818 annotated diseases. # ### 3.a Extract Disease Information from Text # # In this example we will use Named-Entity Recognition model created using BioBERT to extract disease information from the following paragraph: # # **Input Text** # # _"The authors describe the case of a 56 - year - old woman with chronic, severe heart failure # secondary to dilated cardiomyopathy and absence of significant ventricular arrhythmias # who developed QT prolongation and torsade de pointes ventricular tachycardia during one cycle # of intermittent low dose (2.5 mcg/kg per min) dobutamine. # This report of torsade de pointes ventricular tachycardia during intermittent dobutamine # supports the hypothesis that unpredictable fatal arrhythmias may occur even with low doses # and in patients with no history of significant rhythm disturbances. # The mechanisms of proarrhythmic effects of Dubutamine are discussed."_ # # **Output visualized using displaCy** # # <div class="entities" style="line-height: 2.5; direction: ltr">The authors describe the case of a 56 year old woman with chronic , severe # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # heart failure # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # secondary to # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # dilated cardiomyopathy # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # and absence of significant # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # ventricular arrhythmias # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # who developed QT # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # prolongation # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # and torsade de pointes # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # ventricular tachycardia # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # during one cycle of intermittent low dose ( 2.5 mcg / kg per min ) dobutamine . This report of torsade de pointes # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # ventricular tachycardia # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # during intermittent dobutamine supports the hypothesis that unpredictable fatal # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # arrhythmias # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # may occur even with low doses and in patients with no history of significant # <mark class="entity" style="background: #ddd; padding: 0.45em 0.6em; margin: 0 0.25em; line-height: 1; border-radius: 0.35em; box-decoration-break: clone; -webkit-box-decoration-break: clone"> # rhythm disturbances # <span style="font-size: 0.8em; font-weight: bold; line-height: 1; border-radius: 0.35em; text-transform: uppercase; vertical-align: middle; margin-left: 0.5rem">DISEASE</span> # </mark> # . The mechanisms of proarrhythmic effects of Dubutamine are discussed . </div> # In[ ]: text= """ The authors describe the case of a 56 year old woman with chronic, severe heart failure secondary to dilated cardiomyopathy and absence of significant ventricular arrhythmias who developed QT prolongation and torsade de pointes ventricular tachycardia during one cycle of intermittent low dose (2.5 mcg/kg per min) dobutamine. This report of torsade de pointes ventricular tachycardia during intermittent dobutamine supports the hypothesis that unpredictable fatal arrhythmias may occur even with low doses and in patients with no history of significant rhythm disturbances. The mechanisms of proarrhythmic effects of Dubutamine are discussed. """ # In[ ]: import os import sys notebooks_dir = '../notebooks' working_dir = '../' if working_dir not in sys.path: sys.path.append(working_dir) # In[ ]: # Convert the text into the IOB tags format seen during training, using dummy placeholder labels import spacy nlp = spacy.load("en_core_web_sm") text = text.strip() doc = nlp(text) input_file = os.path.join(notebooks_dir, 'input.tsv') with open(os.path.join(input_file), 'w') as wf: for word in doc: if word.text is '\n': continue wf.write(word.text + '\tO\n') wf.write('\n') # Indicate end of text # ### 3.b Mixed Precision # # Mixed precision training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of tensor cores in the Volta and Turing architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. # # For information about: # - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. # - How to access and enable AMP for TensorFlow, see [Using TF-AMP](https://docs.nvidia.com/deeplearning/dgx/tensorflow-user-guide/index.html#tfamp) from the TensorFlow User Guide. # - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. # In this notebook we control mixed precision execution with the environmental variable: # In[ ]: import os os.environ["TF_ENABLE_AUTO_MIXED_PRECISION"] = "1" # ## 4. Fine-Tuned NVIDIA BioBERT TF Models # # We have the following Named Entity Reconition models fine-tuned from BioBERT available on NGC (NVIDIA GPU Cluster, https://ngc.nvidia.com). # # | **Model** | **Description** | # |:---------:|:----------:| # |BioBERT NER BC5CDR Disease | NER model to extract disease information from text, trained on the BC5CDR-Disease dataset | # |BioBERT NER BC5CDR Chemical | NER model to extract chemical information from text, trained on the BC5CDR-Chemical dataset. | # # # For this exampple, we will download the Diease NER model trained from the BC5CDR-disease Dataset. # # In[ ]: # biobert_tf_uncased_base_ner_disease DATA_DIR_BIOBERT = '../data/download/finetuned_biobert_model' get_ipython().system('mkdir -p $DATA_DIR_BIOBERT') get_ipython().system('wget --content-disposition -O $DATA_DIR_BIOBERT/biobert_tf_uncased_base_ner_disease_19.08.1.zip https://api.ngc.nvidia.com/v2/models/nvidia/biobert_tf_uncased_base_ner_disease/versions/19.08.1/zip && unzip -n -d $DATA_DIR_BIOBERT/ $DATA_DIR_BIOBERT/biobert_tf_uncased_base_ner_disease_19.08.1.zip && rm $DATA_DIR_BIOBERT/biobert_tf_uncased_base_ner_disease_19.08.1.zip') # In the code that follows we will refer to these models. # ## 5. Running NER task inference # # In order to run NER inference we will follow step-by-step the flow implemented in run_ner.py. # ### 5.a Configure Things # In[ ]: import run_ner from run_ner import BC5CDRProcessor, model_fn_builder, file_based_input_fn_builder, filed_based_convert_examples_to_features, result_to_pair import os, sys import time import tensorflow as tf import modeling import tokenization tf.logging.set_verbosity(tf.logging.ERROR) # Create the output directory where all the results are saved. output_dir = os.path.join(working_dir, 'output') tf.gfile.MakeDirs(output_dir) # The config json file corresponding to the pre-trained BERT model. # This specifies the model architecture. bert_config_file = os.path.join(DATA_DIR_BIOBERT, 'bert_config.json') # The vocabulary file that the BERT model was trained on. vocab_file = os.path.join(DATA_DIR_BIOBERT, 'vocab.txt') init_checkpoint = os.path.join(DATA_DIR_BIOBERT, 'model.ckpt') # Whether to lower case the input text. # Should be True for uncased models and False for cased models. # The BioBERT available in NGC is uncased do_lower_case = True # Total batch size for predictions predict_batch_size = 1 params = dict([('batch_size', predict_batch_size)]) # The maximum total input sequence length after WordPiece tokenization. # Sequences longer than this will be truncated, and sequences shorter than this will be padded. max_seq_length = 128 # This is a WA to use flags from here: flags = tf.flags if 'f' not in tf.flags.FLAGS: tf.app.flags.DEFINE_string('f', '', 'kernel') FLAGS = flags.FLAGS FLAGS.output_dir = output_dir # ### 5.b Define Tokenizer & Create Estimator # In[ ]: # Validate the casing config consistency with the checkpoint name. tokenization.validate_case_matches_checkpoint(do_lower_case, init_checkpoint) # Create the tokenizer. tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case) # Load the configuration from file bert_config = modeling.BertConfig.from_json_file(bert_config_file) # Use the data processor for BC5CDR processor = BC5CDRProcessor() # Get labels in the index order that was used during training label_list = processor.get_labels() # Reverse index the labels. This will be used later when evaluating predictions. id2label = {} for (i, label) in enumerate(label_list, 1): id2label[i] = label config = tf.ConfigProto(log_device_placement=True) run_config = tf.estimator.RunConfig( model_dir=None, session_config=config, save_checkpoints_steps=1000, keep_checkpoint_max=1) # Use model function builder to create the model function model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list) + 1, init_checkpoint=init_checkpoint) # amp=use_amp) estimator = tf.estimator.Estimator( model_fn=model_fn, config=run_config, params=params) # ### 5.c Run Inference # In[ ]: # Load the input data using the BC5CDR processor predict_examples = processor.get_test_examples(notebooks_dir, file_name='input.tsv') # Convert to tf_records and save it predict_file = os.path.join(output_dir, "predict.tf_record") filed_based_convert_examples_to_features(predict_examples, label_list, max_seq_length, tokenizer, predict_file) tf.logging.info("***** Running predictions *****") tf.logging.info(" Num orig examples = %d", len(predict_examples)) tf.logging.info(" Batch size = %d", predict_batch_size) # Run prediction on this tf_record file predict_input_fn = file_based_input_fn_builder( input_file=predict_file, batch_size=predict_batch_size, seq_length=max_seq_length, is_training=False, drop_remainder=False) pred_start_time = time.time() predictions = estimator.predict(input_fn=predict_input_fn) predictions = list(predictions) pred_time_elapsed = time.time() - pred_start_time tf.logging.info("-----------------------------") tf.logging.info("Total Inference Time = %0.2f", pred_time_elapsed) # tf.logging.info("Inference Performance = %0.4f sentences/sec", avg_sentences_per_second) tf.logging.info("-----------------------------") # ### 5.d Save Predictions # In[ ]: # Let's now process the predictions and save them to file(s) tf.logging.info("Save Predictions:") # File containing the list of predictions as IOB tags output_predict_file = os.path.join(FLAGS.output_dir, "label_test.txt") # File containing the list of words, the dummy token and the predicted IOB tag test_labels_file = os.path.join(FLAGS.output_dir, "test_labels.txt") test_labels_err_file = os.path.join(FLAGS.output_dir, "test_labels_errs.txt") with tf.gfile.Open(output_predict_file, 'w') as writer, \ tf.gfile.Open(test_labels_file, 'w') as tl, \ tf.gfile.Open(test_labels_err_file, 'w') as tle: i=0 for prediction in estimator.predict(input_fn=predict_input_fn, yield_single_examples=True): output_line = "\n".join(id2label[id] for id in prediction if id != 0) + "\n" writer.write(output_line) result_to_pair(predict_examples[i], prediction, id2label, tl, tle) i = i + 1 # ### 5.e Visualize Predictions # In[ ]: # Let's create a function that can formats the predictions for display using displaCy def predictions_for_displacy(predict_examples, predictions, id2label): processed_text = '' entities = [] current_pos = 0 start_pos = 0 end_pos = 0 end_detected = False prev_label = '' for predict_line, pred_ids in zip(predict_examples, predictions): words = str(predict_line.text).split(' ') labels = str(predict_line.label).split(' ') # get from CLS to SEP pred_labels = [] for id in pred_ids: if id == 0: continue curr_label = id2label[id] if curr_label == '[CLS]': continue elif curr_label == '[SEP]': break elif curr_label == 'X': continue pred_labels.append(curr_label) for tok, label, pred_label in zip(words, labels, pred_labels): if pred_label is 'B': start_pos = current_pos elif pred_label is 'I' and prev_label is not 'B' and prev_label is not 'I': start_pos = current_pos elif pred_label is 'O' and (prev_label is 'B' or prev_label is 'I'): end_pos = current_pos end_detected = True if end_detected: entities.append({'start':start_pos, 'end': end_pos, 'label': 'DISEASE'}) start_pos = 0 end_pos = 0 end_detected = False processed_text = processed_text + tok + ' ' current_pos = current_pos + len(tok) + 1 prev_label = pred_label #Handle entity at the very end if start_pos > 0 and end_detected is False: entities.append({'start':start_pos, 'end': current_pos, 'label': 'DISEASE'}) displacy_input = [{"text": processed_text, "ents": entities, "title": None}] return displacy_input # In[ ]: # Convert the predictions to the Named Entities format required by displaCy and visualize displacy_input = predictions_for_displacy(predict_examples, predictions, id2label) html = spacy.displacy.render(displacy_input, style="ent", manual=True) # ## 6. What's next # Now that you are familiar with running NER Inference on BioBERT, using mixed precision, you may want to try extracting disease information from other biomedical text.
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/AMP
AMP
train_benchmark_8xV100-32G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. horovodrun -np 8 bash ./scripts/bind.sh --cpu=exclusive --ib=single -- python3 main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode train_and_eval \ --use_amp \ --use_xla \ --model_dir ./output/ \ --data_dir /data/ \ --log_steps 500 \ --save_checkpoint_freq 10 \ --n_stages 1 \ --max_epochs 3 \ --steps_per_epoch 2000 \ --train_batch_size 128 \ --train_img_size 300 \ --lr_decay cosine \ --lr_init 0.005 \ --weight_decay .000005 \ --opt_epsilon 0.001 \ --moving_average_decay 0.9999 \ --eval_img_size 384 \ --eval_batch_size 128 \ --augmenter_name randaugment \ --raug_num_layers 2 \ --raug_magnitude 15 \ --cutmix_alpha 0 \ --mixup_alpha 0 \ --defer_img_mixing
TensorFlow2/Segmentation/nnUNet/data_loading
data_loading
data_module
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import horovod.tensorflow as hvd from runtime.utils import get_config_file, is_main_process from sklearn.model_selection import KFold from data_loading.dali_loader import fetch_dali_loader from data_loading.utils import get_path, get_split, get_test_fnames, load_data class DataModule: def __init__(self, args): super().__init__() self.args = args self.train_imgs = [] self.train_lbls = [] self.val_imgs = [] self.val_lbls = [] self.test_imgs = [] self.kfold = KFold(n_splits=self.args.nfolds, shuffle=True, random_state=12345) self.data_path = get_path(args) configs = get_config_file(self.args) self.patch_size = configs["patch_size"] self.kwargs = { "dim": self.args.dim, "patch_size": self.patch_size, "seed": self.args.seed, "gpus": hvd.size(), "num_workers": self.args.num_workers, "oversampling": self.args.oversampling, "benchmark": self.args.benchmark, "nvol": self.args.nvol, "bench_steps": self.args.bench_steps, "meta": load_data(self.data_path, "*_meta.npy"), } def setup(self, stage=None): imgs = load_data(self.data_path, "*_x.npy") lbls = load_data(self.data_path, "*_y.npy") self.test_imgs, self.kwargs["meta"] = get_test_fnames(self.args, self.data_path, self.kwargs["meta"]) if self.args.exec_mode != "predict" or self.args.benchmark: train_idx, val_idx = list(self.kfold.split(imgs))[self.args.fold] self.train_imgs = get_split(imgs, train_idx) self.train_lbls = get_split(lbls, train_idx) self.val_imgs = get_split(imgs, val_idx) self.val_lbls = get_split(lbls, val_idx) if is_main_process(): ntrain, nval = len(self.train_imgs), len(self.val_imgs) print(f"Number of examples: Train {ntrain} - Val {nval}") # Shard the validation data self.val_imgs = self.val_imgs[hvd.rank() :: hvd.size()] self.val_lbls = self.val_lbls[hvd.rank() :: hvd.size()] self.cached_val_loader = None elif is_main_process(): print(f"Number of test examples: {len(self.test_imgs)}") def train_dataset(self): return fetch_dali_loader( self.train_imgs, self.train_lbls, self.args.batch_size, "train", **self.kwargs, ) def train_size(self): return len(self.train_imgs) def val_dataset(self): if self.cached_val_loader is None: self.cached_val_loader = fetch_dali_loader(self.val_imgs, self.val_lbls, 1, "eval", **self.kwargs) return self.cached_val_loader def val_size(self): return len(self.val_imgs) def test_dataset(self): if self.kwargs["benchmark"]: return fetch_dali_loader( self.train_imgs, self.train_lbls, self.args.batch_size, "test", **self.kwargs, ) return fetch_dali_loader(self.test_imgs, None, 1, "test", **self.kwargs) def test_size(self): return len(self.test_imgs) def test_fname(self, idx): return self.test_imgs[idx]
PyTorch/Segmentation/nnUNet/triton
triton
config_model_on_triton
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To configure model on Triton, you can use `config_model_on_triton.py` script. This will prepare layout of Model Repository, including Model Configuration. ```shell script python ./triton/config_model_on_triton.py \ --model-repository /model_repository \ --model-path /models/exported/model.onnx \ --model-format onnx \ --model-name ResNet50 \ --model-version 1 \ --max-batch-size 32 \ --precision fp16 \ --backend-accelerator trt \ --load-model explicit \ --timeout 120 \ --verbose ``` If Triton server to which we prepare model repository is running with **explicit model control mode**, use `--load-model` argument to send request load_model request to Triton Inference Server. If server is listening on non-default address or port use `--server-url` argument to point server control endpoint. If it is required to use HTTP protocol to communicate with Triton server use `--http` argument. To improve inference throughput you can use [dynamic batching](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#dynamic-batcher) for your model by providing `--preferred-batch-sizes` and `--max-queue-delay-us` parameters. For models which doesn't support batching, set `--max-batch-sizes` to 0. By default Triton will [automatically obtain inputs and outputs definitions](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#auto-generated-model-configuration). but for TorchScript ang TF GraphDef models script uses file with I/O specs. This file is automatically generated when the model is converted to ScriptModule (either traced or scripted). If there is a need to pass different than default path to I/O spec file use `--io-spec` CLI argument. I/O spec file is yaml file with below structure: ```yaml - inputs: - name: input dtype: float32 # np.dtype name shape: [None, 224, 224, 3] - outputs: - name: probabilities dtype: float32 shape: [None, 1001] - name: classes dtype: int32 shape: [None, 1] ``` """ import argparse import logging import time from model_navigator import Accelerator, Format, Precision from model_navigator.args import str2bool from model_navigator.log import set_logger, log_dict from model_navigator.triton import ModelConfig, TritonClient, TritonModelStore LOGGER = logging.getLogger("config_model") def _available_enum_values(my_enum): return [item.value for item in my_enum] def main(): parser = argparse.ArgumentParser( description="Create Triton model repository and model configuration", allow_abbrev=False ) parser.add_argument("--model-repository", required=True, help="Path to Triton model repository.") parser.add_argument("--model-path", required=True, help="Path to model to configure") # TODO: automation parser.add_argument( "--model-format", required=True, choices=_available_enum_values(Format), help="Format of model to deploy", ) parser.add_argument("--model-name", required=True, help="Model name") parser.add_argument("--model-version", default="1", help="Version of model (default 1)") parser.add_argument( "--max-batch-size", type=int, default=32, help="Maximum batch size allowed for inference. " "A max_batch_size value of 0 indicates that batching is not allowed for the model", ) # TODO: automation parser.add_argument( "--precision", type=str, default=Precision.FP16.value, choices=_available_enum_values(Precision), help="Model precision (parameter used only by Tensorflow backend with TensorRT optimization)", ) # Triton Inference Server endpoint parser.add_argument( "--server-url", type=str, default="grpc://localhost:8001", help="Inference server URL in format protocol://host[:port] (default grpc://localhost:8001)", ) parser.add_argument( "--load-model", choices=["none", "poll", "explicit"], help="Loading model while Triton Server is in given model control mode", ) parser.add_argument( "--timeout", default=120, help="Timeout in seconds to wait till model load (default=120)", type=int ) # optimization related parser.add_argument( "--backend-accelerator", type=str, choices=_available_enum_values(Accelerator), default=Accelerator.TRT.value, help="Select Backend Accelerator used to serve model", ) parser.add_argument("--number-of-model-instances", type=int, default=1, help="Number of model instances per GPU") parser.add_argument( "--preferred-batch-sizes", type=int, nargs="*", help="Batch sizes that the dynamic batcher should attempt to create. " "In case --max-queue-delay-us is set and this parameter is not, default value will be --max-batch-size", ) parser.add_argument( "--max-queue-delay-us", type=int, default=0, help="Max delay time which dynamic batcher shall wait to form a batch (default 0)", ) parser.add_argument( "--capture-cuda-graph", type=int, default=0, help="Use cuda capture graph (used only by TensorRT platform)", ) parser.add_argument("-v", "--verbose", help="Provide verbose logs", type=str2bool, default=False) args = parser.parse_args() set_logger(verbose=args.verbose) log_dict("args", vars(args)) config = ModelConfig.create( model_path=args.model_path, # model definition model_name=args.model_name, model_version=args.model_version, model_format=args.model_format, precision=args.precision, max_batch_size=args.max_batch_size, # optimization accelerator=args.backend_accelerator, gpu_engine_count=args.number_of_model_instances, preferred_batch_sizes=args.preferred_batch_sizes or [], max_queue_delay_us=args.max_queue_delay_us, capture_cuda_graph=args.capture_cuda_graph, ) model_store = TritonModelStore(args.model_repository) model_store.deploy_model(model_config=config, model_path=args.model_path) if args.load_model != "none": client = TritonClient(server_url=args.server_url, verbose=args.verbose) client.wait_for_server_ready(timeout=args.timeout) if args.load_model == "explicit": client.load_model(model_name=args.model_name) if args.load_model == "poll": time.sleep(15) client.wait_for_model(model_name=args.model_name, model_version=args.model_version, timeout_s=args.timeout) if __name__ == "__main__": main()
PyTorch/SpeechSynthesis/Tacotron2/platform
platform
DGX1_tacotron2_FP32_8NGPU_train
mkdir -p output python -m multiproc train.py -m Tacotron2 -o output/ -lr 1e-3 --epochs 1501 -bs 48 --weight-decay 1e-6 --grad-clip-thresh 1.0 --cudnn-enabled --load-mel-from-disk --training-files=filelists/ljs_mel_text_train_filelist.txt --validation-files=filelists/ljs_mel_text_val_filelist.txt --log-file nvlog.json --anneal-steps 500 1000 1500 --anneal-factor 0.1
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads
heads
keras_mask_head
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Keras Mask Heads. Contains Mask prediction head classes for different meta architectures. All the mask prediction heads have a predict function that receives the `features` as the first argument and returns `mask_predictions`. """ import tensorflow as tf from object_detection.predictors.heads import head class ConvolutionalMaskHead(head.KerasHead): """Convolutional class prediction head.""" def __init__(self, is_training, num_classes, use_dropout, dropout_keep_prob, kernel_size, num_predictions_per_location, conv_hyperparams, freeze_batchnorm, use_depthwise=False, mask_height=7, mask_width=7, masks_are_class_agnostic=False, name=None): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_classes: Number of classes. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). num_predictions_per_location: Number of box predictions to be made per spatial location. Int specifying number of boxes per location. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object containing hyperparameters for convolution ops. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during training or not. When training with a small batch size (e.g. 1), it is desirable to freeze batch norm update and use pretrained batch norm params. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. mask_height: Desired output mask height. The default value is 7. mask_width: Desired output mask width. The default value is 7. masks_are_class_agnostic: Boolean determining if the mask-head is class-agnostic or not. name: A string name scope to assign to the model. If `None`, Keras will auto-generate one from the class name. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalMaskHead, self).__init__(name=name) self._is_training = is_training self._num_classes = num_classes self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._num_predictions_per_location = num_predictions_per_location self._use_depthwise = use_depthwise self._mask_height = mask_height self._mask_width = mask_width self._masks_are_class_agnostic = masks_are_class_agnostic self._mask_predictor_layers = [] # Add a slot for the background class. if self._masks_are_class_agnostic: self._num_masks = 1 else: self._num_masks = self._num_classes num_mask_channels = self._num_masks * self._mask_height * self._mask_width if self._use_dropout: self._mask_predictor_layers.append( # The Dropout layer's `training` parameter for the call method must # be set implicitly by the Keras set_learning_phase. The object # detection training code takes care of this. tf.keras.layers.Dropout(rate=1.0 - self._dropout_keep_prob)) if self._use_depthwise: self._mask_predictor_layers.append( tf.keras.layers.DepthwiseConv2D( [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, strides=1, dilation_rate=1, name='MaskPredictor_depthwise', **conv_hyperparams.params())) self._mask_predictor_layers.append( conv_hyperparams.build_batch_norm( training=(is_training and not freeze_batchnorm), name='MaskPredictor_depthwise_batchnorm')) self._mask_predictor_layers.append( conv_hyperparams.build_activation_layer( name='MaskPredictor_depthwise_activation')) self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [1, 1], name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) else: self._mask_predictor_layers.append( tf.keras.layers.Conv2D( num_predictions_per_location * num_mask_channels, [self._kernel_size, self._kernel_size], padding='SAME', name='MaskPredictor', **conv_hyperparams.params(use_bias=True))) def _predict(self, features): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. Returns: mask_predictions: A float tensors of shape [batch_size, num_anchors, num_masks, mask_height, mask_width] representing the mask predictions for the proposals. """ mask_predictions = features for layer in self._mask_predictor_layers: mask_predictions = layer(mask_predictions) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] mask_predictions = tf.reshape( mask_predictions, [batch_size, -1, self._num_masks, self._mask_height, self._mask_width]) return mask_predictions
TensorFlow/Detection/SSD/models/research/object_detection/core
core
keypoint_ops_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.core.keypoint_ops.""" import numpy as np import tensorflow as tf from object_detection.core import keypoint_ops class KeypointOpsTest(tf.test.TestCase): """Tests for common keypoint operations.""" def test_scale(self): keypoints = tf.constant([ [[0.0, 0.0], [100.0, 200.0]], [[50.0, 120.0], [100.0, 140.0]] ]) y_scale = tf.constant(1.0 / 100) x_scale = tf.constant(1.0 / 200) expected_keypoints = tf.constant([ [[0., 0.], [1.0, 1.0]], [[0.5, 0.6], [1.0, 0.7]] ]) output = keypoint_ops.scale(keypoints, y_scale, x_scale) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_clip_to_window(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.25], [0.75, 0.75]] ]) output = keypoint_ops.clip_to_window(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_prune_outside_window(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([[[0.25, 0.5], [0.75, 0.75]], [[np.nan, np.nan], [np.nan, np.nan]]]) output = keypoint_ops.prune_outside_window(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_change_coordinate_frame(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) window = tf.constant([0.25, 0.25, 0.75, 0.75]) expected_keypoints = tf.constant([ [[0, 0.5], [1.0, 1.0]], [[0.5, -0.5], [1.5, 1.5]] ]) output = keypoint_ops.change_coordinate_frame(keypoints, window) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_normalized_coordinates(self): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_normalized_coordinates_already_normalized(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_normalized_coordinates( keypoints, 40, 60) with self.test_session() as sess: with self.assertRaisesOpError('assertion failed'): sess.run(output) def test_to_absolute_coordinates(self): keypoints = tf.constant([ [[0.25, 0.5], [0.75, 0.75]], [[0.5, 0.0], [1.0, 1.0]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) expected_keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_to_absolute_coordinates_already_absolute(self): keypoints = tf.constant([ [[10., 30.], [30., 45.]], [[20., 0.], [40., 60.]] ]) output = keypoint_ops.to_absolute_coordinates( keypoints, 40, 60) with self.test_session() as sess: with self.assertRaisesOpError('assertion failed'): sess.run(output) def test_flip_horizontal(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.1, 0.9], [0.3, 0.7], [0.2, 0.8]], [[0.4, 0.6], [0.6, 0.4], [0.5, 0.5]], ]) output = keypoint_ops.flip_horizontal(keypoints, 0.5, flip_permutation) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_flip_vertical(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.4], [0.5, 0.5], [0.6, 0.6]] ]) flip_permutation = [0, 2, 1] expected_keypoints = tf.constant([ [[0.9, 0.1], [0.7, 0.3], [0.8, 0.2]], [[0.6, 0.4], [0.4, 0.6], [0.5, 0.5]], ]) output = keypoint_ops.flip_vertical(keypoints, 0.5, flip_permutation) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) def test_rot90(self): keypoints = tf.constant([ [[0.1, 0.1], [0.2, 0.2], [0.3, 0.3]], [[0.4, 0.6], [0.5, 0.6], [0.6, 0.7]] ]) expected_keypoints = tf.constant([ [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3]], [[0.4, 0.4], [0.4, 0.5], [0.3, 0.6]], ]) output = keypoint_ops.rot90(keypoints) with self.test_session() as sess: output_, expected_keypoints_ = sess.run([output, expected_keypoints]) self.assertAllClose(output_, expected_keypoints_) if __name__ == '__main__': tf.test.main()
PyTorch/Classification/GPUNet
GPUNet
validate
#!/usr/bin/env python3 # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2019-2022 Ross Wightman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import csv import glob import json import logging import os import time from collections import OrderedDict from contextlib import suppress import itertools import dllogger import torch import torch.nn as nn import torch.nn.parallel from timm.data import ( RealLabelsImagenet, create_dataset, create_loader, resolve_data_config, ) from timm.models import ( apply_test_time_pool, create_model, is_model, list_models, load_checkpoint, ) # GPUNet Integration from timm.models.registry import register_model from timm.utils import ( # , set_jit_fuser AverageMeter, accuracy, natural_key, setup_default_logging, ) from configs.model_hub import get_configs, get_model_list from models.gpunet_builder import GPUNet_Builder @register_model def gpunet_0(pretrained=False, **kwargs): """Constructs GPUNet-0.""" modelJSON, checkpoint_path = get_configs(batch=1, latency="0.65ms", gpuType="GV100") builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_0", "crop_pct": 1.0, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: load_checkpoint(model, checkpoint_path, use_ema=True) return model @register_model def gpunet_1(pretrained=False, **kwargs): """Constructs GPUNet-1.""" modelJSON, checkpoint_path = get_configs(batch=1, latency="0.85ms", gpuType="GV100") builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_1", "crop_pct": 1.0, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: load_checkpoint(model, checkpoint_path, use_ema=True) return model @register_model def gpunet_2(pretrained=False, **kwargs): """Constructs GPUNet-2.""" modelJSON, checkpoint_path = get_configs(batch=1, latency="1.75ms", gpuType="GV100") builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_2", "crop_pct": 1.0, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: load_checkpoint(model, checkpoint_path, use_ema=True) return model @register_model def gpunet_d1(pretrained=False, **kwargs): """Constructs GPUNet-D1.""" modelJSON, checkpoint_path = get_configs( batch=1, latency="1.25ms-D", gpuType="GV100" ) builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_d1", "crop_pct": 1.0, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: load_checkpoint(model, checkpoint_path, use_ema=True) return model @register_model def gpunet_d2(pretrained=False, **kwargs): """Constructs GPUNet-D2.""" modelJSON, checkpoint_path = get_configs( batch=1, latency="2.25ms-D", gpuType="GV100" ) builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_d2", "crop_pct": 1.0, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: load_checkpoint(model, checkpoint_path, use_ema=True) return model @register_model def gpunet_p0(pretrained=False, **kwargs): """Constructs GPUNet-P0.""" modelJSON, checkpoint_path = get_configs( batch=1, latency="0.5ms-D", gpuType="GV100" ) builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_p0", "crop_pct": 0.875, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: model.load_state_dict(torch.load(checkpoint_path)) return model @register_model def gpunet_p1(pretrained=False, **kwargs): """Constructs GPUNet-P1.""" modelJSON, checkpoint_path = get_configs( batch=1, latency="0.8ms-D", gpuType="GV100" ) builder = GPUNet_Builder() model = builder.get_model(modelJSON) model.default_cfg = { "architecture": "gpunet_p1", "crop_pct": 0.875, "interpolation": "bicubic", "input_size": (3, model.imgRes, model.imgRes), "num_classes": 1000, "mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225), } for key in model.default_cfg: setattr(model, key, model.default_cfg[key]) if pretrained: model.load_state_dict(torch.load(checkpoint_path)) return model has_apex = False try: from apex import amp has_apex = True except ImportError: pass has_native_amp = False try: if getattr(torch.cuda.amp, "autocast") is not None: has_native_amp = True except AttributeError: pass torch.backends.cudnn.benchmark = True _logger = logging.getLogger("validate") parser = argparse.ArgumentParser(description="PyTorch ImageNet Validation") parser.add_argument("data", metavar="DIR", help="path to dataset") parser.add_argument( "--dataset", "-d", metavar="NAME", default="", help="dataset type (default: ImageFolder/ImageTar if empty)", ) parser.add_argument( "--split", metavar="NAME", default="validation", help="dataset split (default: validation)", ) # DLlogger parser.add_argument( "--dllogger-name", default="/logs/log.json", type=str, help="name of dllogger file" ) parser.add_argument( "--dataset-download", action="store_true", default=False, help="Allow download of dataset for torch/ and tfds/ datasets that support it.", ) parser.add_argument( "--model", "-m", metavar="NAME", default="dpn92", help="model architecture (default: dpn92)", ) parser.add_argument( "-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers (default: 2)", ) parser.add_argument( "-b", "--batch-size", default=256, type=int, metavar="N", help="mini-batch size (default: 256)", ) parser.add_argument( "--img-size", default=None, type=int, metavar="N", help="Input image dimension, uses model default if empty", ) parser.add_argument( "--input-size", default=None, nargs=3, type=int, metavar="N N N", help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty", ) parser.add_argument( "--crop-pct", default=None, type=float, metavar="N", help="Input image center crop pct", ) parser.add_argument( "--mean", type=float, nargs="+", default=None, metavar="MEAN", help="Override mean pixel value of dataset", ) parser.add_argument( "--std", type=float, nargs="+", default=None, metavar="STD", help="Override std deviation of of dataset", ) parser.add_argument( "--interpolation", default="", type=str, metavar="NAME", help="Image resize interpolation type (overrides model)", ) parser.add_argument( "--num-classes", type=int, default=None, help="Number classes in dataset" ) parser.add_argument( "--class-map", default="", type=str, metavar="FILENAME", help='path to class to idx mapping file (default: "")', ) parser.add_argument( "--gp", default=None, type=str, metavar="POOL", help="Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.", ) parser.add_argument( "--log-freq", default=10, type=int, metavar="N", help="batch logging frequency (default: 10)", ) parser.add_argument( "--checkpoint", default="", type=str, metavar="PATH", help="path to latest checkpoint (default: none)", ) parser.add_argument( "--pretrained", dest="pretrained", action="store_true", help="use pre-trained model" ) parser.add_argument("--num-gpu", type=int, default=1, help="Number of GPUS to use") parser.add_argument( "--test-pool", dest="test_pool", action="store_true", help="enable test time pool" ) parser.add_argument( "--no-prefetcher", action="store_true", default=False, help="disable fast prefetcher", ) parser.add_argument( "--pin-mem", action="store_true", default=False, help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.", ) parser.add_argument( "--channels-last", action="store_true", default=False, help="Use channels_last memory layout", ) parser.add_argument( "--amp", action="store_true", default=False, help="Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.", ) parser.add_argument( "--apex-amp", action="store_true", default=False, help="Use NVIDIA Apex AMP mixed precision", ) parser.add_argument( "--native-amp", action="store_true", default=False, help="Use Native Torch AMP mixed precision", ) parser.add_argument( "--tf-preprocessing", action="store_true", default=False, help="Use Tensorflow preprocessing pipeline (require CPU TF installed", ) parser.add_argument( "--use-ema", dest="use_ema", action="store_true", help="use ema version of weights if present", ) parser.add_argument( "--torchscript", dest="torchscript", action="store_true", help="convert model torchscript for inference", ) parser.add_argument( "--fuser", default="", type=str, help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')", ) parser.add_argument( "--results-file", default="", type=str, metavar="FILENAME", help="Output csv file for validation results (summary)", ) parser.add_argument( "--real-labels", default="", type=str, metavar="FILENAME", help="Real labels JSON file for imagenet evaluation", ) parser.add_argument( "--valid-labels", default="", type=str, metavar="FILENAME", help="Valid label indices txt file for validation of partial label space", ) def validate(args): # might as well try to validate something args.pretrained = args.pretrained or not args.checkpoint args.prefetcher = not args.no_prefetcher amp_autocast = suppress # do nothing if args.amp: if has_native_amp: args.native_amp = True elif has_apex: args.apex_amp = True else: _logger.warning("Neither APEX or Native Torch AMP is available.") assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set." if args.native_amp: amp_autocast = torch.cuda.amp.autocast _logger.info("Validating in mixed precision with native PyTorch AMP.") elif args.apex_amp: _logger.info("Validating in mixed precision with NVIDIA APEX AMP.") else: _logger.info("Validating in float32. AMP not enabled.") if args.fuser: set_jit_fuser(args.fuser) # create model model = create_model( args.model, pretrained=args.pretrained, num_classes=args.num_classes, in_chans=3, global_pool=args.gp, scriptable=args.torchscript, ) if args.num_classes is None: assert hasattr( model, "num_classes" ), "Model must have `num_classes` attr if not set on cmd line/config." args.num_classes = model.num_classes if args.checkpoint: load_checkpoint(model, args.checkpoint, args.use_ema) param_count = sum([m.numel() for m in model.parameters()]) _logger.info("Model %s created, param count: %d" % (args.model, param_count)) data_config = resolve_data_config( vars(args), model=model, use_test_size=True, verbose=True ) dllogger_dir = os.path.dirname(args.dllogger_name) if dllogger_dir and not os.path.exists(dllogger_dir): os.makedirs(dllogger_dir, exist_ok=True) log_path = args.dllogger_name original_log_path = log_path if os.path.exists(log_path): for i in itertools.count(): s_fname = original_log_path.split('.') log_path = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1] if not os.path.exists(log_path): break dllogger.init( backends=[ dllogger.JSONStreamBackend(verbosity=1, filename=log_path), dllogger.StdOutBackend(verbosity=0), ] ) dllogger.metadata("top1", {"unit": None}) dllogger.metadata("top5", {"unit": None}) dllogger.metadata("average_ips", {"unit": "images/s"}) test_time_pool = False if args.test_pool: model, test_time_pool = apply_test_time_pool( model, data_config, use_test_size=True ) if args.torchscript: torch.jit.optimized_execution(True) model = torch.jit.script(model) model = model.cuda() if args.apex_amp: model = amp.initialize(model, opt_level="O1") if args.channels_last: model = model.to(memory_format=torch.channels_last) if args.num_gpu > 1: model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))) criterion = nn.CrossEntropyLoss().cuda() dataset = create_dataset( root=args.data, name=args.dataset, split=args.split, download=args.dataset_download, load_bytes=args.tf_preprocessing, class_map=args.class_map, ) if args.valid_labels: with open(args.valid_labels, "r") as f: valid_labels = {int(line.rstrip()) for line in f} valid_labels = [i in valid_labels for i in range(args.num_classes)] else: valid_labels = None if args.real_labels: real_labels = RealLabelsImagenet( dataset.filenames(basename=True), real_json=args.real_labels ) else: real_labels = None crop_pct = 1.0 if test_time_pool else data_config["crop_pct"] loader = create_loader( dataset, input_size=data_config["input_size"], batch_size=args.batch_size, use_prefetcher=args.prefetcher, interpolation=data_config["interpolation"], mean=data_config["mean"], std=data_config["std"], num_workers=args.workers, crop_pct=crop_pct, pin_memory=args.pin_mem, tf_preprocessing=args.tf_preprocessing, ) batch_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() model.eval() with torch.no_grad(): # warmup, reduce variability of first batch time, especially for comparing torchscript vs non input = torch.randn( (args.batch_size,) + tuple(data_config["input_size"]) ).cuda() if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) model(input) end = time.time() for batch_idx, (input, target) in enumerate(loader): if args.no_prefetcher: target = target.cuda() input = input.cuda() if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) # compute output with amp_autocast(): output = model(input) if valid_labels is not None: output = output[:, valid_labels] loss = criterion(output, target) if real_labels is not None: real_labels.add_result(output) # measure accuracy and record loss acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5)) losses.update(loss.item(), input.size(0)) top1.update(acc1.item(), input.size(0)) top5.update(acc5.item(), input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if batch_idx % args.log_freq == 0: _logger.info( "Test: [{0:>4d}/{1}] " "Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) " "Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) " "Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) " "Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})".format( batch_idx, len(loader), batch_time=batch_time, rate_avg=input.size(0) / batch_time.avg, loss=losses, top1=top1, top5=top5, ) ) if real_labels is not None: # real labels mode replaces topk values at the end top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5) else: top1a, top5a = top1.avg, top5.avg results = OrderedDict( model=args.model, top1=round(top1a, 4), top1_err=round(100 - top1a, 4), top5=round(top5a, 4), top5_err=round(100 - top5a, 4), param_count=round(param_count / 1e6, 2), img_size=data_config["input_size"][-1], cropt_pct=crop_pct, interpolation=data_config["interpolation"], average_ips = len(dataset)/batch_time.sum ) _logger.info( " * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})".format( results["top1"], results["top1_err"], results["top5"], results["top5_err"] ) ) return results def _try_run(args, initial_batch_size): batch_size = initial_batch_size results = OrderedDict() error_str = "Unknown" while batch_size >= 1: args.batch_size = batch_size torch.cuda.empty_cache() try: results = validate(args) return results except RuntimeError as e: error_str = str(e) if "channels_last" in error_str: break _logger.warning( f'"{error_str}" while running validation. Reducing batch size to {batch_size} for retry.' ) batch_size = batch_size // 2 results["error"] = error_str _logger.error(f"{args.model} failed to validate ({error_str}).") return results def main(): setup_default_logging() args = parser.parse_args() model_cfgs = [] model_names = [] if os.path.isdir(args.checkpoint): # validate all checkpoints in a path with same model checkpoints = glob.glob(args.checkpoint + "/*.pth.tar") checkpoints += glob.glob(args.checkpoint + "/*.pth") model_names = list_models(args.model) model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)] else: if args.model == "all": # validate all models in a list of names with pretrained checkpoints args.pretrained = True model_names = list_models( pretrained=True, exclude_filters=["*_in21k", "*_in22k", "*_dino"] ) model_cfgs = [(n, "") for n in model_names] elif not is_model(args.model): # model name doesn't exist, try as wildcard filter model_names = list_models(args.model) model_cfgs = [(n, "") for n in model_names] if not model_cfgs and os.path.isfile(args.model): with open(args.model) as f: model_names = [line.rstrip() for line in f] model_cfgs = [(n, None) for n in model_names if n] if len(model_cfgs): results_file = args.results_file or "./results-all.csv" _logger.info( "Running bulk validation on these pretrained models: {}".format( ", ".join(model_names) ) ) results = [] try: initial_batch_size = args.batch_size for m, c in model_cfgs: args.model = m args.checkpoint = c r = _try_run(args, initial_batch_size) if "error" in r: continue if args.checkpoint: r["checkpoint"] = args.checkpoint results.append(r) except KeyboardInterrupt as e: pass results = sorted(results, key=lambda x: x["top1"], reverse=True) if len(results): write_results(results_file, results) else: results = validate(args) dllogger.log(step=tuple(), data={"average_ips": results["average_ips"], "top1": results["top1"], "top5": results["top5"]}, verbosity=1) dllogger.flush() # output results in JSON to stdout w/ delimiter for runner script print(f"--result\n{json.dumps(results, indent=4)}") def write_results(results_file, results): with open(results_file, mode="w") as cf: dw = csv.DictWriter(cf, fieldnames=results[0].keys()) dw.writeheader() for r in results: dw.writerow(r) cf.flush() if __name__ == "__main__": main()
PyTorch/LanguageModeling/BART/bart/configuration
configuration
configuration_bart
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The Fairseq Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BART configuration """ import logging from bart.configuration.configuration_utils import PretrainedConfig logger = logging.getLogger(__name__) BART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/bart-base": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-base/config.json", "facebook/bart-large": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large/config.json", "facebook/bart-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-mnli/config.json", "facebook/bart-large-cnn": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json", "facebook/bart-large-xsum": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-xsum/config.json", "facebook/mbart-large-en-ro": "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/mbart-large-en-ro/config.json", "yjernite/bart_eli5": "https://s3.amazonaws.com/models.huggingface.co/bert/yjernite/bart_eli5/config.json", } BART_CONFIG_ARGS_DOC = r""" Args: vocab_size (:obj:`int`, optional, defaults to 50265): defines the different tokens that can be represented by `inputs_ids` passed to the forward method. d_model (:obj:`int`, optional, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (:obj:`int`, optional, defaults to 12): Number of encoder layers, 16 for pegasus, 6 for bart-base and marian decoder_layers (:obj:`int`, optional, defaults to 12): Number of decoder layers, 16 for pegasus, 6 for bart-base and marian encoder_attention_heads (:obj:`int`, optional, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (:obj:`int`, optional, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (:obj:`int`, optional, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. encoder_ffn_dim (:obj:`int`, optional, defaults to 4096): Dimensionality of the "intermediate" (i.e., feed-forward) layer in decoder. activation_function (:obj:`str` or :obj:`function`, optional, defaults to "gelu"): The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported. dropout (:obj:`float`, optional, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (:obj:`float`, optional, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (:obj:`int`, optional, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (:obj:`float`, optional, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. add_bias_logits (:obj:`int`, optional, defaults to False): True for marian only. normalize_before (:obj:`bool`, optional, defaults to False): Call layernorm before attention ops. True for pegasus, mbart. False for bart. FIXME: marian? normalize_embedding (:obj:`bool`, optional, defaults to True): Call layernorm after embeddings. Only True for Bart. static_position_embeddings (:obj:`bool`, optional, defaults to False): Don't learn positional embeddings, use sinusoidal. True for marian, pegasus. add_final_layer_norm (:obj:`bool`, optional, defaults to False): Why not add another layernorm? scale_embedding (:obj:`bool`, optional, defaults to False): Scale embeddings by diving by sqrt(d_model). eos_token_id (:obj:`int`, optional, defaults to 2) End of stream token id. pad_token_id (:obj:`int`, optional, defaults to 1) Padding token id. bos_token_id (:obj:`int`, optional, defaults to 0) Beginning of stream token id. encoder_layerdrop: (:obj:`float`, optional, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. decoder_layerdrop: (:obj:`float`, optional, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. num_labels: (:obj:`int`, optional, defaults to 2): for SequenceClassification is_encoder_decoder (:obj:`int`, optional, defaults to True): True force_bos_token_to_be_generated (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force BOS token to be generated at step 1 (after ``decoder_start_token_id``), only true for `bart-large-cnn`. """ class BartConfig(PretrainedConfig): r""" Configuration class for Bart. Parameters are renamed from the fairseq implementation """ model_type = "bart" def __init__( self, activation_dropout=0.0, activation_function="gelu", vocab_size=50265, d_model=1024, encoder_ffn_dim=4096, encoder_layers=12, encoder_attention_heads=16, decoder_ffn_dim=4096, decoder_layers=12, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, attention_dropout=0.0, dropout=0.1, max_position_embeddings=1024, init_std=0.02, classifier_dropout=0.0, num_labels=3, is_encoder_decoder=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, normalize_before=False, add_final_layer_norm=False, scale_embedding=False, normalize_embedding=True, static_position_embeddings=False, add_bias_logits=False, force_bos_token_to_be_generated=False, attention_bias=True, **common_kwargs ): r""" :class:`~transformers.BartConfig` is the configuration class for `BartModel`. Examples:: >>> from transformers import BartConfig, BartModel >>> config = BartConfig.from_pretrained('facebook/bart-large') >>> model = BartModel(config) """ if "hidden_size" in common_kwargs: raise ValueError("hidden size is called d_model") super().__init__( num_labels=num_labels, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, **common_kwargs, ) self.vocab_size = vocab_size self.d_model = d_model # encoder_embed_dim and decoder_embed_dim self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = self.num_hidden_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.max_position_embeddings = max_position_embeddings self.init_std = init_std # Normal(0, this parameter) self.activation_function = activation_function # Params introduced for Mbart self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True self.normalize_embedding = normalize_embedding # True for mbart, False otherwise self.normalize_before = normalize_before # combo of fairseq's encoder_ and decoder_normalize_before self.add_final_layer_norm = add_final_layer_norm # Params introduced for Marian self.add_bias_logits = add_bias_logits self.static_position_embeddings = static_position_embeddings # 3 Types of Dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.dropout = dropout # Classifier stuff self.classif_dropout = classifier_dropout self.force_bos_token_to_be_generated = force_bos_token_to_be_generated self.attention_bias = attention_bias @property def num_attention_heads(self) -> int: return self.encoder_attention_heads @property def hidden_size(self) -> int: return self.d_model def is_valid_mbart(self) -> bool: """Is the configuration aligned with the MBART paper.""" if self.normalize_before and self.add_final_layer_norm and self.scale_embedding: return True if self.normalize_before or self.add_final_layer_norm or self.scale_embedding: logger.info("This configuration is a mixture of MBART and BART settings") return False
TensorFlow2/Recommendation/SIM/sim/data
data
dataloader
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import multiprocessing from functools import partial import tensorflow as tf from sim.data.defaults import (DIMENSIONS_SELECTOR, LABEL_CHANNEL, NEGATIVE_HISTORY_CHANNEL, POSITIVE_HISTORY_CHANNEL, TARGET_ITEM_FEATURES_CHANNEL, USER_FEATURES_CHANNEL, REMAINDER_FILENAME) def _remap_column_values_tfrecord(sample, feature_spec, long_seq_length): channel_spec = feature_spec.channel_spec features = feature_spec.feature_spec user_features = { f_name: tf.reshape(sample[f_name], [-1]) for f_name in channel_spec[USER_FEATURES_CHANNEL] } target_item_features = { f_name: tf.reshape(sample[f_name], [-1]) for f_name in channel_spec[TARGET_ITEM_FEATURES_CHANNEL] } padded_positive = { f_name: tf.reshape(sample[f_name], [-1, features[f_name][DIMENSIONS_SELECTOR][0]]) for f_name in channel_spec[POSITIVE_HISTORY_CHANNEL] } padded_negative = { f_name: tf.reshape(sample[f_name], [-1, features[f_name][DIMENSIONS_SELECTOR][0]]) for f_name in channel_spec[NEGATIVE_HISTORY_CHANNEL] } long_sequence_features = { f_name: val[:, :long_seq_length] for f_name, val in padded_positive.items() } short_sequence_features = { f_name: val[:, long_seq_length:] for f_name, val in padded_positive.items() } short_neg_sequence_features = { f_name: val[:, long_seq_length:] for f_name, val in padded_negative.items() } first_positive_feature_name = channel_spec[POSITIVE_HISTORY_CHANNEL][0] first_positive_feature = padded_positive[first_positive_feature_name] history_mask = tf.cast(tf.greater(first_positive_feature, 0), tf.float32) long_sequence_mask = history_mask[:, :long_seq_length] short_sequence_mask = history_mask[:, long_seq_length:] label_name = channel_spec[LABEL_CHANNEL][0] target = tf.reshape(sample[label_name], [-1]) return { "user_features": user_features, "target_item_features": target_item_features, "long_sequence_features": long_sequence_features, "short_sequence_features": short_sequence_features, "short_neg_sequence_features": short_neg_sequence_features, "long_sequence_mask": long_sequence_mask, "short_sequence_mask": short_sequence_mask, "other_features": None }, target def split_prebatch(sample, split_into): res = {} for f_name, val in sample.items(): res[f_name] = tf.reshape(val, [split_into, -1]) return tf.data.Dataset.from_tensor_slices(res) def get_dataloader_tfrecord( file_paths, feature_spec, batch_size, long_seq_length, num_gpus=1, id=0, drop_remainder=False, repeat_count=0, prefetch_buffer_size=90, num_parallel_calls=None, disable_cache=False, prebatch_size=0 ): features = feature_spec.feature_spec prebatched = prebatch_size > 0 remainder_file = None if file_paths[-1].name == REMAINDER_FILENAME: remainder_file = file_paths[-1:] file_paths = file_paths[:-1] tf_feature_spec = {} for name, feature in features.items(): dimensions = feature.get(DIMENSIONS_SELECTOR) if dimensions is None: dimensions = [1] if prebatched else [] if prebatched: dimensions = dimensions.copy() dimensions[0] *= prebatch_size tf_feature_spec[name] = tf.io.FixedLenFeature(dimensions, tf.int64) if num_parallel_calls is None: num_cpus = multiprocessing.cpu_count() num_parallel_calls = 4 * num_cpus // num_gpus dataset = tf.data.TFRecordDataset(file_paths, num_parallel_reads=num_parallel_calls) dataset = dataset.shard(num_gpus, id) splitting_function = None if prebatched: if batch_size >= prebatch_size: batch_size = batch_size // prebatch_size else: split_into = prebatch_size // batch_size splitting_function = partial(split_prebatch, split_into=split_into) batch_size = 1 dataset = dataset.batch( batch_size, drop_remainder=drop_remainder, num_parallel_calls=num_parallel_calls ) dataset = dataset.map( map_func=partial(tf.io.parse_example, features=tf_feature_spec), num_parallel_calls=num_parallel_calls ) if splitting_function is not None: dataset = dataset.flat_map(splitting_function) if not drop_remainder and id == 0 and remainder_file is not None: tf_feature_spec_remainder = { name: tf.io.RaggedFeature(tf.int64) for name in tf_feature_spec } remainder = tf.data.TFRecordDataset(remainder_file) remainder = remainder.map( map_func=partial(tf.io.parse_example, features=tf_feature_spec_remainder) ) dataset = dataset.concatenate(remainder) dataset = dataset.map( map_func=partial(_remap_column_values_tfrecord, feature_spec=feature_spec, long_seq_length=long_seq_length), num_parallel_calls=num_parallel_calls ) if repeat_count > 0: dataset = dataset.repeat( count=repeat_count ) if prefetch_buffer_size > 0: dataset = dataset.prefetch( buffer_size=prefetch_buffer_size ) if not disable_cache: dataset = dataset.cache() return dataset
TensorFlow/Detection/SSD/models/research/object_detection/builders
builders
matcher_builder_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for matcher_builder.""" import tensorflow as tf from google.protobuf import text_format from object_detection.builders import matcher_builder from object_detection.matchers import argmax_matcher from object_detection.matchers import bipartite_matcher from object_detection.protos import matcher_pb2 class MatcherBuilderTest(tf.test.TestCase): def test_build_arg_max_matcher_with_defaults(self): matcher_text_proto = """ argmax_matcher { } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertAlmostEqual(matcher_object._matched_threshold, 0.5) self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.5) self.assertTrue(matcher_object._negatives_lower_than_unmatched) self.assertFalse(matcher_object._force_match_for_each_row) def test_build_arg_max_matcher_without_thresholds(self): matcher_text_proto = """ argmax_matcher { ignore_thresholds: true } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertEqual(matcher_object._matched_threshold, None) self.assertEqual(matcher_object._unmatched_threshold, None) self.assertTrue(matcher_object._negatives_lower_than_unmatched) self.assertFalse(matcher_object._force_match_for_each_row) def test_build_arg_max_matcher_with_non_default_parameters(self): matcher_text_proto = """ argmax_matcher { matched_threshold: 0.7 unmatched_threshold: 0.3 negatives_lower_than_unmatched: false force_match_for_each_row: true use_matmul_gather: true } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue(isinstance(matcher_object, argmax_matcher.ArgMaxMatcher)) self.assertAlmostEqual(matcher_object._matched_threshold, 0.7) self.assertAlmostEqual(matcher_object._unmatched_threshold, 0.3) self.assertFalse(matcher_object._negatives_lower_than_unmatched) self.assertTrue(matcher_object._force_match_for_each_row) self.assertTrue(matcher_object._use_matmul_gather) def test_build_bipartite_matcher(self): matcher_text_proto = """ bipartite_matcher { } """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) matcher_object = matcher_builder.build(matcher_proto) self.assertTrue( isinstance(matcher_object, bipartite_matcher.GreedyBipartiteMatcher)) def test_raise_error_on_empty_matcher(self): matcher_text_proto = """ """ matcher_proto = matcher_pb2.Matcher() text_format.Merge(matcher_text_proto, matcher_proto) with self.assertRaises(ValueError): matcher_builder.build(matcher_proto) if __name__ == '__main__': tf.test.main()
PyTorch/Translation/Transformer/fairseq/optim
optim
nag
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from torch.optim.optimizer import Optimizer, required from . import FairseqOptimizer, register_optimizer @register_optimizer('nag') class FairseqNAG(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = NAG(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay, } class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] lr = group['lr'] lr_old = group.get('lr_old', lr) lr_correct = lr / lr_old for p in group['params']: if p.grad is None: continue d_p = p.grad.data param_state = self.state[p] if 'momentum_buffer' not in param_state: param_state['momentum_buffer'] = d_p.clone().zero_() buf = param_state['momentum_buffer'] if weight_decay != 0: p.data.mul_(1 - lr * weight_decay) p.data.add_(momentum * momentum * lr_correct, buf) p.data.add_(-(1 + momentum) * lr, d_p) buf.mul_(momentum * lr_correct).add_(-lr, d_p) group['lr_old'] = lr return loss
TensorFlow/Translation/GNMT/examples
examples
DGXA100_TF32_1GPU
python nmt.py --output_dir=results --batch_size=128 --learning_rate=5e-4
PyTorch/SpeechRecognition/wav2vec2/common
common
utils
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.distributed as dist def print_once(*msg, local_rank=0): """Single stdout print with multiple processes.""" if dist.is_initialized(): if dist.get_rank() == 0: print(*msg) elif int(os.environ.get('WORLD_SIZE', 1)) == 1: print(*msg) elif int(os.environ.get('RANK', 0)) == 0 and local_rank == 0: print(*msg) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def set_torch_seed(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) def reduce_tensor(tensor, world_size, mean=True): if world_size == 1: return tensor rt = tensor.clone() dist.all_reduce(rt, op=dist.ReduceOp.SUM) if mean: rt = rt.true_divide(world_size) return rt def all_reduce_cpu_scalars(data, device=torch.device('cuda')): data_keys = list(data.keys()) data_vals = list(data.values()) tensor_vals = torch.tensor(data_vals, dtype=torch.double, device=device) dist.all_reduce(tensor_vals, op=dist.ReduceOp.SUM) data_vals = tensor_vals.cpu().numpy() return dict(zip(data_keys, data_vals)) def setup_distributed(local_rank): multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1 if multi_gpu: torch.cuda.set_device(local_rank) dist.init_process_group(backend='nccl', init_method='env://') world_size = dist.get_world_size() print_once(f'Distributed training with {world_size} GPUs\n') else: world_size = 1 return world_size
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/utils
utils
keras
import tensorflow as tf class KerasCallback(tf.keras.callbacks.Callback): """ Utility class that simplifies usage of Keras callback across different modes. """ def __init__(self): super().__init__() self._current_epoch = None def on_any_begin(self, mode, logs): pass def on_any_end(self, mode, logs): pass def on_any_epoch_begin(self, mode, epoch, logs): pass def on_any_epoch_end(self, mode, epoch, logs): pass def on_any_batch_begin(self, mode, epoch, batch, logs): pass def on_any_batch_end(self, mode, epoch, batch, logs): pass def on_train_begin(self, logs=None): self.on_any_begin('train', logs) def on_test_begin(self, logs=None): self.on_any_begin('test', logs) def on_predict_begin(self, logs=None): self.on_any_begin('predict', logs) def on_train_end(self, logs=None): self.on_any_end('train', logs) def on_test_end(self, logs=None): self.on_any_end('test', logs) def on_predict_end(self, logs=None): self.on_any_end('predict', logs) def on_epoch_begin(self, epoch, logs=None): self._current_epoch = epoch self.on_any_epoch_begin('train', epoch, logs) def on_epoch_end(self, epoch, logs=None): self.on_any_epoch_end('train', epoch, logs) self._current_epoch = None def on_train_batch_begin(self, batch, logs=None): self.on_any_batch_begin('train', self._current_epoch, batch, logs) def on_test_batch_begin(self, batch, logs=None): self.on_any_batch_begin('test', None, batch, logs) def on_predict_batch_begin(self, batch, logs=None): self.on_any_batch_begin('predict', None, batch, logs) def on_train_batch_end(self, batch, logs=None): self.on_any_batch_end('train', self._current_epoch, batch, logs) def on_test_batch_end(self, batch, logs=None): self.on_any_batch_end('test', None, batch, logs) def on_predict_batch_end(self, batch, logs=None): self.on_any_batch_end('predict', None, batch, logs)
PyTorch/Detection/SSD/ssd
ssd
logger
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import numpy as np import dllogger as DLLogger class EpochMeter: def __init__(self, name): self.name = name self.data = [] def update(self, epoch, val): self.data.append((epoch, val)) class IterationMeter: def __init__(self, name): self.name = name self.data = [] def update(self, epoch, iteration, val): self.data.append((epoch, iteration, val)) class IterationAverageMeter: def __init__(self, name): self.name = name self.data = [] self.n = 0 self.sum = 0 def update_iter(self, val): if math.isfinite(val): # sometimes loss === 'inf' self.n += 1 self.sum += 0 if math.isinf(val) else val def update_epoch(self, epoch): self.data.append((epoch, self.sum / self.n)) self.n = 0 self.sum = 0 class Logger: def __init__(self, name, json_output=None, log_interval=20): self.name = name self.train_loss_logger = IterationAverageMeter("Training loss") self.train_epoch_time_logger = EpochMeter("Training 1 epoch time") self.val_acc_logger = EpochMeter("Validation accuracy") self.log_interval = log_interval backends = [ DLLogger.StdOutBackend(DLLogger.Verbosity.DEFAULT) ] if json_output: backends.append(DLLogger.JSONStreamBackend(DLLogger.Verbosity.VERBOSE, json_output)) DLLogger.init(backends) DLLogger.metadata("mAP", {"unit": None}) self.epoch = 0 self.train_iter = 0 self.summary = {} def step(self): return ( self.epoch, self.train_iter, ) def log_params(self, data): DLLogger.log("PARAMETER", data) DLLogger.flush() def log(self, key, value): DLLogger.log(self.step(), { key: value }) DLLogger.flush() def add_to_summary(self, data): for key, value in data.items(): self.summary[key] = value def log_summary(self): DLLogger.log((), self.summary) def update_iter(self, epoch, iteration, loss): self.epoch = epoch self.train_iter = iteration self.train_loss_logger.update_iter(loss) if iteration % self.log_interval == 0: self.log('loss', loss) def update_epoch(self, epoch, acc): self.epoch = epoch self.train_loss_logger.update_epoch(epoch) self.val_acc_logger.update(epoch, acc) data = { 'mAP': acc } self.add_to_summary(data) DLLogger.log((self.epoch,), data) def update_epoch_time(self, epoch, time): self.epoch = epoch self.train_epoch_time_logger.update(epoch, time) DLLogger.log((self.epoch,), { 'time': time }) def print_results(self): return self.train_loss_logger.data, self.val_acc_logger.data, self.train_epoch_time_logger class BenchmarkMeter: def __init__(self, name): self.name = name self.data = [] self.total_images = 0 self.total_time = 0 self.avr_images_per_second = 0 def update(self, bs, time): self.total_images += bs self.total_time += time self.avr_images_per_second = self.total_images / self.total_time self.data.append(bs / time) class BenchLogger(Logger): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.images_per_ses = BenchmarkMeter(self.name) DLLogger.metadata("avg_img/sec", {"unit": "images/s"}) DLLogger.metadata("med_img/sec", {"unit": "images/s"}) DLLogger.metadata("min_img/sec", {"unit": "images/s"}) DLLogger.metadata("max_img/sec", {"unit": "images/s"}) def update(self, bs, time): self.images_per_ses.update(bs, time) def print_result(self): total_bs = self.images_per_ses.total_images total_time = self.images_per_ses.total_time avr = self.images_per_ses.avr_images_per_second data = np.array(self.images_per_ses.data) med = np.median(data) DLLogger.log((), { 'avg_img/sec': avr, 'med_img/sec': np.median(data), 'min_img/sec': np.min(data), 'max_img/sec': np.max(data), }) print("Done benchmarking. Total images: {}\ttotal time: {:.3f}\tAverage images/sec: {:.3f}\tMedian images/sec: {:.3f}".format( total_bs, total_time, avr, med )) return med
TensorFlow/LanguageModeling/Transformer-XL
Transformer-XL
prep_text8
#!/usr/bin/env python # coding=utf-8 # BSD 3-Clause License # # Copyright (c) 2017, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import zipfile from io import open if os.path.exists('train.txt'): print('Tokenized text8 already exists - skipping processing') sys.exit() data = zipfile.ZipFile('text8.zip').extractall() data = open('text8', 'r', encoding='utf-8').read() print('Length of text8: {}'.format(len(data))) num_test_chars = 5000000 train_data = data[: -2 * num_test_chars] valid_data = data[-2 * num_test_chars: -num_test_chars] test_data = data[-num_test_chars:] for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]: print('{} will have {} bytes'.format(fn, len(part))) print('- Tokenizing...') # Change space ' ' to underscore '_' part_str = ' '.join(['_' if c == ' ' else c for c in part.strip()]) print('- Writing...') f = open(fn, 'w').write(part_str) f = open(fn + '.raw', 'w', encoding='utf-8').write(part)
TensorFlow2/Segmentation/MaskRCNN/mrcnn_tf2/object_detection
object_detection
balanced_positive_negative_sampler
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Class to subsample minibatches by balancing positives and negatives. Subsamples minibatches based on a pre-specified positive fraction in range [0,1]. The class presumes there are many more negatives than positive examples: if the desired batch_size cannot be achieved with the pre-specified positive fraction, it fills the rest with negative examples. If this is not sufficient for obtaining the desired batch_size, it returns fewer examples. The main function to call is Subsample(self, indicator, labels). For convenience one can also call SubsampleWeights(self, weights, labels) which is defined in the minibatch_sampler base class. When is_static is True, it implements a method that guarantees static shapes. It also ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. This is originally implemented in TensorFlow Object Detection API. """ import tensorflow as tf from mrcnn_tf2.object_detection import minibatch_sampler, ops class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler): """Subsamples minibatches to a desired balance of positives and negatives.""" def __init__(self, positive_fraction=0.5, is_static=False): """Constructs a minibatch sampler. Args: positive_fraction: desired fraction of positive examples (scalar in [0,1]) in the batch. is_static: If True, uses an implementation with static shape guarantees. Raises: ValueError: if positive_fraction < 0, or positive_fraction > 1 """ if positive_fraction < 0 or positive_fraction > 1: raise ValueError('positive_fraction should be in range [0,1]. ' 'Received: %s.' % positive_fraction) self._positive_fraction = positive_fraction self._is_static = is_static def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size): """Counts the number of positives and negatives numbers to be sampled. Args: sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains the signed indices of the examples where the sign is based on the label value. The examples that cannot be sampled are set to 0. It samples atmost sample_size*positive_fraction positive examples and remaining from negative examples. sample_size: Size of subsamples. Returns: A tuple containing the number of positive and negative labels in the subsample. """ input_length = tf.shape(input=sorted_indices_tensor)[0] valid_positive_index = tf.greater(sorted_indices_tensor, tf.zeros(input_length, tf.int32)) num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(valid_positive_index, tf.int32)) max_num_positive_samples = tf.constant( int(sample_size * self._positive_fraction), tf.int32) num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos) num_negative_samples = tf.constant(sample_size, tf.int32) - num_positive_samples return num_positive_samples, num_negative_samples def _get_values_from_start_and_end(self, input_tensor, num_start_samples, num_end_samples, total_num_samples): """slices num_start_samples and last num_end_samples from input_tensor. Args: input_tensor: An int32 tensor of shape [N] to be sliced. num_start_samples: Number of examples to be sliced from the beginning of the input tensor. num_end_samples: Number of examples to be sliced from the end of the input tensor. total_num_samples: Sum of is num_start_samples and num_end_samples. This should be a scalar. Returns: A tensor containing the first num_start_samples and last num_end_samples from input_tensor. """ input_length = tf.shape(input=input_tensor)[0] start_positions = tf.less(tf.range(input_length), num_start_samples) end_positions = tf.greater_equal( tf.range(input_length), input_length - num_end_samples) selected_positions = tf.logical_or(start_positions, end_positions) selected_positions = tf.cast(selected_positions, tf.float32) indexed_positions = tf.multiply(tf.cumsum(selected_positions), selected_positions) one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1, total_num_samples, dtype=tf.float32) return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32), one_hot_selector, axes=[0, 0]), tf.int32) def _static_subsample(self, indicator, batch_size, labels): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. N should be a complie time constant. batch_size: desired batch size. This scalar cannot be None. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. N should be a complie time constant. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. It ensures the length of output of the subsample is always batch_size, even when number of examples set to True in indicator is less than batch_size. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ # Check if indicator and labels have a static size. if not indicator.shape.is_fully_defined(): raise ValueError('indicator must be static in shape when is_static is' 'True') if not labels.shape.is_fully_defined(): raise ValueError('labels must be static in shape when is_static is' 'True') if not isinstance(batch_size, int): raise ValueError('batch_size has to be an integer when is_static is' 'True.') input_length = tf.shape(input=indicator)[0] # Set the number of examples set True in indicator to be at least # batch_size. num_true_sampled = tf.reduce_sum(input_tensor=tf.cast(indicator, tf.float32)) additional_false_sample = tf.less_equal( tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)), batch_size - num_true_sampled) indicator = tf.logical_or(indicator, additional_false_sample) # Shuffle indicator and label. Need to store the permutation to restore the # order post sampling. permutation = tf.random.shuffle(tf.range(input_length)) indicator = ops.matmul_gather_on_zeroth_axis( tf.cast(indicator, tf.float32), permutation) labels = ops.matmul_gather_on_zeroth_axis( tf.cast(labels, tf.float32), permutation) # index (starting from 1) when indicator is True, 0 when False indicator_idx = tf.where( tf.cast(indicator, tf.bool), tf.range(1, input_length + 1), tf.zeros(input_length, tf.int32)) # Replace -1 for negative, +1 for positive labels signed_label = tf.where( tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32), tf.scalar_mul(-1, tf.ones(input_length, tf.int32))) # negative of index for negative label, positive index for positive label, # 0 when indicator is False. signed_indicator_idx = tf.multiply(indicator_idx, signed_label) sorted_signed_indicator_idx = tf.nn.top_k( signed_indicator_idx, input_length, sorted=True).values [num_positive_samples, num_negative_samples] = self._get_num_pos_neg_samples( sorted_signed_indicator_idx, batch_size) sampled_idx = self._get_values_from_start_and_end( sorted_signed_indicator_idx, num_positive_samples, num_negative_samples, batch_size) # Shift the indices to start from 0 and remove any samples that are set as # False. sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32) sampled_idx = tf.multiply( tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32), sampled_idx) sampled_idx_indicator = tf.cast(tf.reduce_sum( input_tensor=tf.one_hot(sampled_idx, depth=input_length), axis=0), tf.bool) # project back the order based on stored permutations reprojections = tf.one_hot(permutation, depth=input_length, dtype=tf.float32) return tf.cast(tf.tensordot( tf.cast(sampled_idx_indicator, tf.float32), reprojections, axes=[0, 0]), tf.bool) def subsample(self, indicator, batch_size, labels, scope=None): """Returns subsampled minibatch. Args: indicator: boolean tensor of shape [N] whose True entries can be sampled. batch_size: desired batch size. If None, keeps all positive samples and randomly selects negative samples so that the positive sample fraction matches self._positive_fraction. It cannot be None is is_static is True. labels: boolean tensor of shape [N] denoting positive(=True) and negative (=False) examples. scope: name scope. Returns: sampled_idx_indicator: boolean tensor of shape [N], True for entries which are sampled. Raises: ValueError: if labels and indicator are not 1D boolean tensors. """ if len(indicator.get_shape().as_list()) != 1: raise ValueError('indicator must be 1 dimensional, got a tensor of ' 'shape %s' % indicator.get_shape()) if len(labels.get_shape().as_list()) != 1: raise ValueError('labels must be 1 dimensional, got a tensor of ' 'shape %s' % labels.get_shape()) if labels.dtype != tf.bool: raise ValueError('labels should be of type bool. Received: %s' % labels.dtype) if indicator.dtype != tf.bool: raise ValueError('indicator should be of type bool. Received: %s' % indicator.dtype) if self._is_static: return self._static_subsample(indicator, batch_size, labels) else: # Only sample from indicated samples negative_idx = tf.logical_not(labels) positive_idx = tf.logical_and(labels, indicator) negative_idx = tf.logical_and(negative_idx, indicator) # Sample positive and negative samples separately if batch_size is None: max_num_pos = tf.reduce_sum(input_tensor=tf.cast(positive_idx, dtype=tf.int32)) else: max_num_pos = int(self._positive_fraction * batch_size) sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos) num_sampled_pos = tf.reduce_sum(input_tensor=tf.cast(sampled_pos_idx, tf.int32)) if batch_size is None: negative_positive_ratio = ( 1 - self._positive_fraction) / self._positive_fraction max_num_neg = tf.cast( negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32), dtype=tf.int32) else: max_num_neg = batch_size - num_sampled_pos sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg) return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
TensorFlow2/Recommendation/WideAndDeep/tests/feature_specs
feature_specs
no_onehot
channel_spec: label: - clicked map: [] multihot_categorical: - topic_id_list - entity_id_list - category_id_list numerical: - document_id_document_id_promo_sim_categories - document_id_document_id_promo_sim_topics - document_id_document_id_promo_sim_entities - document_id_promo_ctr - publisher_id_promo_ctr - source_id_promo_ctr - document_id_promo_count - publish_time_days_since_published - ad_id_ctr - advertiser_id_ctr - campaign_id_ctr - ad_id_count - publish_time_promo_days_since_published onehot_categorical: [] feature_spec: ad_id_count: {} ad_id_ctr: {} advertiser_id_ctr: {} campaign_id_ctr: {} category_id_list: cardinality: 100 max_hotness: 3 clicked: {} document_id_document_id_promo_sim_categories: {} document_id_document_id_promo_sim_entities: {} document_id_document_id_promo_sim_topics: {} document_id_promo_count: {} document_id_promo_ctr: {} entity_id_list: cardinality: 10000 max_hotness: 3 publish_time_days_since_published: {} publish_time_promo_days_since_published: {} publisher_id_promo_ctr: {} source_id_promo_ctr: {} topic_id_list: cardinality: 350 max_hotness: 3 metadata: {} source_spec: test: - features: - clicked - topic_id_list - entity_id_list - category_id_list - document_id_document_id_promo_sim_categories - document_id_document_id_promo_sim_topics - document_id_document_id_promo_sim_entities - document_id_promo_ctr - publisher_id_promo_ctr - source_id_promo_ctr - document_id_promo_count - publish_time_days_since_published - ad_id_ctr - advertiser_id_ctr - campaign_id_ctr - ad_id_count - publish_time_promo_days_since_published files: - valid.csv type: csv train: - features: - clicked - topic_id_list - entity_id_list - category_id_list - document_id_document_id_promo_sim_categories - document_id_document_id_promo_sim_topics - document_id_document_id_promo_sim_entities - document_id_promo_ctr - publisher_id_promo_ctr - source_id_promo_ctr - document_id_promo_count - publish_time_days_since_published - ad_id_ctr - advertiser_id_ctr - campaign_id_ctr - ad_id_count - publish_time_promo_days_since_published files: - train.csv type: csv
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
ssd_inception_v3_pets
# SSD with Inception v2 configured for Oxford-IIIT Pets Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { ssd { num_classes: 37 box_coder { faster_rcnn_box_coder { y_scale: 10.0 x_scale: 10.0 height_scale: 5.0 width_scale: 5.0 } } matcher { argmax_matcher { matched_threshold: 0.5 unmatched_threshold: 0.5 ignore_thresholds: false negatives_lower_than_unmatched: true force_match_for_each_row: true } } similarity_calculator { iou_similarity { } } anchor_generator { ssd_anchor_generator { num_layers: 6 min_scale: 0.2 max_scale: 0.95 aspect_ratios: 1.0 aspect_ratios: 2.0 aspect_ratios: 0.5 aspect_ratios: 3.0 aspect_ratios: 0.3333 reduce_boxes_in_lowest_layer: true } } image_resizer { fixed_shape_resizer { height: 300 width: 300 } } box_predictor { convolutional_box_predictor { min_depth: 0 max_depth: 0 num_layers_before_predictor: 0 use_dropout: false dropout_keep_probability: 0.8 kernel_size: 3 box_code_size: 4 apply_sigmoid_to_scores: false conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.03 mean: 0.0 } } } } } feature_extractor { type: 'ssd_inception_v3' min_depth: 16 depth_multiplier: 1.0 conv_hyperparams { activation: RELU_6, regularizer { l2_regularizer { weight: 0.00004 } } initializer { truncated_normal_initializer { stddev: 0.1 mean: 0.0 } } batch_norm { train: true, scale: true, center: true, decay: 0.9997, epsilon: 0.01, } } override_base_feature_extractor_hyperparams: true } loss { classification_loss { weighted_sigmoid { } } localization_loss { weighted_smooth_l1 { } } hard_example_miner { num_hard_examples: 3000 iou_threshold: 0.99 loss_type: CLASSIFICATION max_negatives_per_positive: 3 min_negatives_per_image: 0 } classification_weight: 1.0 localization_weight: 1.0 } normalize_loss_by_num_matches: true post_processing { batch_non_max_suppression { score_threshold: 1e-8 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 100 } score_converter: SIGMOID } } } train_config: { batch_size: 24 optimizer { rms_prop_optimizer: { learning_rate: { exponential_decay_learning_rate { initial_learning_rate: 0.004 decay_steps: 800720 decay_factor: 0.95 } } momentum_optimizer_value: 0.9 decay: 0.9 epsilon: 1.0 } } fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true load_all_detection_checkpoint_vars: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } data_augmentation_options { ssd_random_crop { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_train.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" } eval_config: { metrics_set: "coco_detection_metrics" num_examples: 1101 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pet_faces_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/pet_label_map.pbtxt" shuffle: false num_readers: 1 }
TensorFlow/Recommendation/WideAndDeep
WideAndDeep
requirements
git+https://github.com/NVIDIA/dllogger@v1.0.0#egg=dllogger ipdb==0.13.9 pyspark==3.1.3
PyTorch/Detection/Efficientdet/effdet/object_detection
object_detection
region_similarity_calculator
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Region Similarity Calculators for BoxLists. Region Similarity Calculators compare a pairwise measure of similarity between the boxes in two BoxLists. """ import torch from .box_list import BoxList def area(boxlist: BoxList): """Computes area of boxes. Args: boxlist: BoxList holding N boxes Returns: a tensor with shape [N] representing box areas. """ y_min, x_min, y_max, x_max = boxlist.boxes().chunk(4, dim=1) out = (y_max - y_min).squeeze(1) * (x_max - x_min).squeeze(1) return out def intersection(boxlist1: BoxList, boxlist2: BoxList): """Compute pairwise intersection areas between boxes. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes Returns: a tensor with shape [N, M] representing pairwise intersections """ y_min1, x_min1, y_max1, x_max1 = boxlist1.boxes().chunk(4, dim=1) y_min2, x_min2, y_max2, x_max2 = boxlist2.boxes().chunk(4, dim=1) all_pairs_min_ymax = torch.min(y_max1, y_max2.t()) all_pairs_max_ymin = torch.max(y_min1, y_min2.t()) intersect_heights = torch.clamp(all_pairs_min_ymax - all_pairs_max_ymin, min=0) all_pairs_min_xmax = torch.min(x_max1, x_max2.t()) all_pairs_max_xmin = torch.max(x_min1, x_min2.t()) intersect_widths = torch.clamp(all_pairs_min_xmax - all_pairs_max_xmin, min=0) return intersect_heights * intersect_widths def iou(boxlist1: BoxList, boxlist2: BoxList): """Computes pairwise intersection-over-union between box collections. Args: boxlist1: BoxList holding N boxes boxlist2: BoxList holding M boxes Returns: a tensor with shape [N, M] representing pairwise iou scores. """ intersections = intersection(boxlist1, boxlist2) areas1 = area(boxlist1) areas2 = area(boxlist2) unions = areas1.unsqueeze(1) + areas2.unsqueeze(0) - intersections return torch.where(intersections == 0.0, torch.zeros_like(intersections), intersections / unions) @torch.jit.script class IouSimilarity(object): """Class to compute similarity based on Intersection over Union (IOU) metric. This class computes pairwise similarity between two BoxLists based on IOU. """ def __init__(self): pass def compare(self, boxlist1: BoxList, boxlist2: BoxList): """Computes matrix of pairwise similarity between BoxLists. This op (to be overridden) computes a measure of pairwise similarity between the boxes in the given BoxLists. Higher values indicate more similarity. Note that this method simply measures similarity and does not explicitly perform a matching. Args: boxlist1: BoxList holding N boxes. boxlist2: BoxList holding M boxes. Returns: a (float32) tensor of shape [N, M] with pairwise similarity score. """ return iou(boxlist1, boxlist2)
TensorFlow/Detection/SSD/models/research/object_detection/metrics
metrics
coco_tools_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow_model.object_detection.metrics.coco_tools.""" import json import os import re import numpy as np from pycocotools import mask import tensorflow as tf from object_detection.metrics import coco_tools class CocoToolsTest(tf.test.TestCase): def setUp(self): groundtruth_annotations_list = [ { 'id': 1, 'image_id': 'first', 'category_id': 1, 'bbox': [100., 100., 100., 100.], 'area': 100.**2, 'iscrowd': 0 }, { 'id': 2, 'image_id': 'second', 'category_id': 1, 'bbox': [50., 50., 50., 50.], 'area': 50.**2, 'iscrowd': 0 }, ] image_list = [{'id': 'first'}, {'id': 'second'}] category_list = [{'id': 0, 'name': 'person'}, {'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}] self._groundtruth_dict = { 'annotations': groundtruth_annotations_list, 'images': image_list, 'categories': category_list } self._detections_list = [ { 'image_id': 'first', 'category_id': 1, 'bbox': [100., 100., 100., 100.], 'score': .8 }, { 'image_id': 'second', 'category_id': 1, 'bbox': [50., 50., 50., 50.], 'score': .7 }, ] def testCocoWrappers(self): groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict) detections = groundtruth.LoadAnnotations(self._detections_list) evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections) summary_metrics, _ = evaluator.ComputeMetrics() self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP']) def testExportGroundtruthToCOCO(self): image_ids = ['first', 'second'] groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float), np.array([[50, 50, 100, 100]], np.float)] groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)] categories = [{'id': 0, 'name': 'person'}, {'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}] output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json') result = coco_tools.ExportGroundtruthToCOCO( image_ids, groundtruth_boxes, groundtruth_classes, categories, output_path=output_path) self.assertDictEqual(result, self._groundtruth_dict) with tf.gfile.GFile(output_path, 'r') as f: written_result = f.read() # The json output should have floats written to 4 digits of precision. matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) self.assertTrue(matcher.findall(written_result)) written_result = json.loads(written_result) self.assertAlmostEqual(result, written_result) def testExportDetectionsToCOCO(self): image_ids = ['first', 'second'] detections_boxes = [np.array([[100, 100, 200, 200]], np.float), np.array([[50, 50, 100, 100]], np.float)] detections_scores = [np.array([.8], np.float), np.array([.7], np.float)] detections_classes = [np.array([1], np.int32), np.array([1], np.int32)] categories = [{'id': 0, 'name': 'person'}, {'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}] output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json') result = coco_tools.ExportDetectionsToCOCO( image_ids, detections_boxes, detections_scores, detections_classes, categories, output_path=output_path) self.assertListEqual(result, self._detections_list) with tf.gfile.GFile(output_path, 'r') as f: written_result = f.read() # The json output should have floats written to 4 digits of precision. matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE) self.assertTrue(matcher.findall(written_result)) written_result = json.loads(written_result) self.assertAlmostEqual(result, written_result) def testExportSegmentsToCOCO(self): image_ids = ['first', 'second'] detection_masks = [np.array( [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], dtype=np.uint8), np.array( [[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]], dtype=np.uint8)] for i, detection_mask in enumerate(detection_masks): detection_masks[i] = detection_mask[:, :, :, None] detection_scores = [np.array([.8], np.float), np.array([.7], np.float)] detection_classes = [np.array([1], np.int32), np.array([1], np.int32)] categories = [{'id': 0, 'name': 'person'}, {'id': 1, 'name': 'cat'}, {'id': 2, 'name': 'dog'}] output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json') result = coco_tools.ExportSegmentsToCOCO( image_ids, detection_masks, detection_scores, detection_classes, categories, output_path=output_path) with tf.gfile.GFile(output_path, 'r') as f: written_result = f.read() written_result = json.loads(written_result) mask_load = mask.decode([written_result[0]['segmentation']]) self.assertTrue(np.allclose(mask_load, detection_masks[0])) self.assertAlmostEqual(result, written_result) def testExportKeypointsToCOCO(self): image_ids = ['first', 'second'] detection_keypoints = [ np.array( [[[100, 200], [300, 400], [500, 600]], [[50, 150], [250, 350], [450, 550]]], dtype=np.int32), np.array( [[[110, 210], [310, 410], [510, 610]], [[60, 160], [260, 360], [460, 560]]], dtype=np.int32)] detection_scores = [np.array([.8, 0.2], np.float), np.array([.7, 0.3], np.float)] detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)] categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3}, {'id': 2, 'name': 'cat'}, {'id': 3, 'name': 'dog'}] output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json') result = coco_tools.ExportKeypointsToCOCO( image_ids, detection_keypoints, detection_scores, detection_classes, categories, output_path=output_path) with tf.gfile.GFile(output_path, 'r') as f: written_result = f.read() written_result = json.loads(written_result) self.assertAlmostEqual(result, written_result) def testSingleImageDetectionBoxesExport(self): boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32) classes = np.array([1, 2, 3], dtype=np.int32) scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]], dtype=np.float32) coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco( image_id='first_image', category_id_set=set([1, 2, 3]), detection_boxes=boxes, detection_classes=classes, detection_scores=scores) for i, annotation in enumerate(coco_annotations): self.assertEqual(annotation['image_id'], 'first_image') self.assertEqual(annotation['category_id'], classes[i]) self.assertAlmostEqual(annotation['score'], scores[i]) self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) def testSingleImageDetectionMaskExport(self): masks = np.array( [[[1, 1,], [1, 1]], [[0, 0], [0, 1]], [[0, 0], [0, 0]]], dtype=np.uint8) classes = np.array([1, 2, 3], dtype=np.int32) scores = np.array([0.8, 0.2, 0.7], dtype=np.float32) coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco( image_id='first_image', category_id_set=set([1, 2, 3]), detection_classes=classes, detection_scores=scores, detection_masks=masks) expected_counts = ['04', '31', '4'] for i, mask_annotation in enumerate(coco_annotations): self.assertEqual(mask_annotation['segmentation']['counts'], expected_counts[i]) self.assertTrue(np.all(np.equal(mask.decode( mask_annotation['segmentation']), masks[i]))) self.assertEqual(mask_annotation['image_id'], 'first_image') self.assertEqual(mask_annotation['category_id'], classes[i]) self.assertAlmostEqual(mask_annotation['score'], scores[i]) def testSingleImageGroundtruthExport(self): masks = np.array( [[[1, 1,], [1, 1]], [[0, 0], [0, 1]], [[0, 0], [0, 0]]], dtype=np.uint8) boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, 1, 1]], dtype=np.float32) coco_boxes = np.array([[0, 0, 1, 1], [0, 0, .5, .5], [.5, .5, .5, .5]], dtype=np.float32) classes = np.array([1, 2, 3], dtype=np.int32) is_crowd = np.array([0, 1, 0], dtype=np.int32) next_annotation_id = 1 expected_counts = ['04', '31', '4'] # Tests exporting without passing in is_crowd (for backward compatibility). coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( image_id='first_image', category_id_set=set([1, 2, 3]), next_annotation_id=next_annotation_id, groundtruth_boxes=boxes, groundtruth_classes=classes, groundtruth_masks=masks) for i, annotation in enumerate(coco_annotations): self.assertEqual(annotation['segmentation']['counts'], expected_counts[i]) self.assertTrue(np.all(np.equal(mask.decode( annotation['segmentation']), masks[i]))) self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) self.assertEqual(annotation['image_id'], 'first_image') self.assertEqual(annotation['category_id'], classes[i]) self.assertEqual(annotation['id'], i + next_annotation_id) # Tests exporting with is_crowd. coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco( image_id='first_image', category_id_set=set([1, 2, 3]), next_annotation_id=next_annotation_id, groundtruth_boxes=boxes, groundtruth_classes=classes, groundtruth_masks=masks, groundtruth_is_crowd=is_crowd) for i, annotation in enumerate(coco_annotations): self.assertEqual(annotation['segmentation']['counts'], expected_counts[i]) self.assertTrue(np.all(np.equal(mask.decode( annotation['segmentation']), masks[i]))) self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i]))) self.assertEqual(annotation['image_id'], 'first_image') self.assertEqual(annotation['category_id'], classes[i]) self.assertEqual(annotation['iscrowd'], is_crowd[i]) self.assertEqual(annotation['id'], i + next_annotation_id) if __name__ == '__main__': tf.test.main()
PyTorch/LanguageModeling/BART/bart/modeling
modeling
modeling_utils
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import re from dataclasses import dataclass from typing import Callable, Dict, List, Optional, Set, Tuple, Union import torch from torch import Tensor, device, dtype, nn from torch.nn import CrossEntropyLoss from torch.nn import functional as F from utils.activations import get_activation from bart.configuration.configuration_utils import PretrainedConfig from utils.file_utils import ( DUMMY_INPUTS, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_NAME, ModelOutput, cached_path, hf_bucket_url, is_remote_url, is_torch_tpu_available, replace_return_docstrings, ) from utils.generation_utils import GenerationMixin logger = logging.getLogger(__name__) try: from torch.nn import Identity except ImportError: # Older PyTorch compatibility class Identity(nn.Module): r"""A placeholder identity operator that is argument-insensitive. """ def __init__(self, *args, **kwargs): super().__init__() def forward(self, input): return input def find_pruneable_heads_and_indices( heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int] ) -> Tuple[Set[int], torch.LongTensor]: """ Finds the heads and their indices taking :obj:`already_pruned_heads` into account. Args: heads (:obj:`List[int]`): List of the indices of heads to prune. n_heads (:obj:`int`): The number of heads in the model. head_size (:obj:`int`): The size of each head. already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads. Returns: :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices. """ mask = torch.ones(n_heads, head_size) heads = set(heads) - already_pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in already_pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index: torch.LongTensor = torch.arange(len(mask))[mask].long() return heads, index class ModuleUtilsMixin: """ A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin. """ def num_parameters(self, only_trainable: bool = False) -> int: """ Get the number of (optionally, trainable) parameters in the model. Args: only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return only the number of trainable parameters Returns: :obj:`int`: The number of parameters. """ params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters() return sum(p.numel() for p in params) @staticmethod def _hook_rss_memory_pre_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_pre_forward = mem.rss return None @staticmethod def _hook_rss_memory_post_forward(module, *args, **kwargs): try: import psutil except (ImportError): raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.") process = psutil.Process(os.getpid()) mem = process.memory_info() module.mem_rss_post_forward = mem.rss mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0) return None def add_memory_hooks(self): """ Add a memory hook before and after each sub-module forward pass to record increase in memory consumption. Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to zero with :obj:`model.reset_memory_hooks_state()`. """ for module in self.modules(): module.register_forward_pre_hook(self._hook_rss_memory_pre_forward) module.register_forward_hook(self._hook_rss_memory_post_forward) self.reset_memory_hooks_state() def reset_memory_hooks_state(self): """ Reset the :obj:`mem_rss_diff` attribute of each module (see :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`). """ for module in self.modules(): module.mem_rss_diff = 0 module.mem_rss_post_forward = 0 module.mem_rss_pre_forward = 0 @property def device(self) -> device: """ :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same device). """ try: return next(self.parameters()).device except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].device @property def dtype(self) -> dtype: """ :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype). """ try: return next(self.parameters()).dtype except StopIteration: # For nn.DataParallel compatibility in PyTorch 1.5 def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]: tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)] return tuples gen = self._named_members(get_members_fn=find_tensor_attributes) first_tuple = next(gen) return first_tuple[1].dtype def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: """ Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (:obj:`torch.Tensor`): An attention mask. Returns: :obj:`torch.Tensor`: The inverted attention mask. """ if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility if self.dtype == torch.float16: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4 elif self.dtype == torch.float32: encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9 else: raise ValueError( "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format( self.dtype ) ) return encoder_extended_attention_mask def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask( self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False ) -> Tensor: """ Prepare the head mask if needed. Args: head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`): The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard). num_hidden_layers (:obj:`int`): The number of hidden layers in the model. is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`): Whether or not the attentions scores are computed by chunks or not. Returns: :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with :obj:`[None]` for each layer. """ if head_mask is not None: head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers) if is_attention_chunked is True: head_mask = head_mask.unsqueeze(-1) else: head_mask = [None] * num_hidden_layers return head_mask def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers): """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]""" if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}" head_mask = head_mask.to(dtype=self.dtype) # switch to fload if need + fp16 compatibility return head_mask class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin): r""" Base class for all models. :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading, downloading and saving models as well as a few methods common to all models to: * resize the input embeddings, * prune heads in the self-attention heads. Class attributes (overridden by derived classes): - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture. - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments: - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the TensorFlow checkpoint. - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to the model. - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint. - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model. - **authorized_missing_keys** (:obj:`Optional[List[str]]`) -- A list of re pattern of tensor names to ignore when loading the model (and avoid unnecessary warnings). """ config_class = None base_model_prefix = "" authorized_missing_keys = None @property def dummy_inputs(self) -> Dict[str, torch.Tensor]: """ :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network. """ return {"input_ids": torch.tensor(DUMMY_INPUTS)} def __init__(self, config: PretrainedConfig, *inputs, **kwargs): super().__init__() if not isinstance(config, PretrainedConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. " "To create a model from a pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ ) ) # Save config in model self.config = config @property def base_model(self) -> nn.Module: """ :obj:`torch.nn.Module`: The main body of the model. """ return getattr(self, self.base_model_prefix, self) def get_input_embeddings(self) -> nn.Module: """ Returns the model's input embeddings. Returns: :obj:`nn.Module`: A torch module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: return base_model.get_input_embeddings() else: raise NotImplementedError def set_input_embeddings(self, value: nn.Module): """ Set model's input embeddings Args: value (:obj:`nn.Module`): A module mapping vocabulary to hidden states. """ base_model = getattr(self, self.base_model_prefix, self) if base_model is not self: base_model.set_input_embeddings(value) else: raise NotImplementedError def get_output_embeddings(self) -> nn.Module: """ Returns the model's output embeddings. Returns: :obj:`nn.Module`: A torch module mapping hidden states to vocabulary. """ return None # Overwrite for models with output embeddings def tie_weights(self): """ Tie the weights between the input embeddings and the output embeddings. If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the weights instead. """ output_embeddings = self.get_output_embeddings() if output_embeddings is not None: self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings()) if self.config.is_encoder_decoder and self.config.tie_encoder_decoder: self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix) @staticmethod def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str): uninitialized_encoder_weights: List[str] = [] assert decoder.__class__ == encoder.__class__, f"{decoder.__class__} and {encoder.__class__} have to be equal." def tie_encoder_to_decoder_recursively( decoder_pointer: nn.Module, encoder_pointer: nn.Module, module_name: str, uninitialized_encoder_weights: List[str], depth=0, ): assert isinstance(decoder_pointer, nn.Module) and isinstance( encoder_pointer, nn.Module ), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module" if hasattr(decoder_pointer, "weight"): assert hasattr(encoder_pointer, "weight") encoder_pointer.weight = decoder_pointer.weight if hasattr(decoder_pointer, "bias"): assert hasattr(encoder_pointer, "bias") encoder_pointer.bias = decoder_pointer.bias return encoder_modules = encoder_pointer._modules decoder_modules = decoder_pointer._modules if len(decoder_modules) > 0: assert ( len(encoder_modules) > 0 ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}" all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()]) encoder_layer_pos = 0 for name, module in decoder_modules.items(): if name.isdigit(): encoder_name = str(int(name) + encoder_layer_pos) decoder_name = name if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])): # this can happen if the name corresponds to the position in a list module list of layers # in this case the decoder has added a cross-attention that the encoder does not have # thus skip this step and substract one layer pos from encoder encoder_layer_pos -= 1 continue elif name not in encoder_modules: continue elif depth > 500: raise ValueError( "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model." ) else: decoder_name = encoder_name = name tie_encoder_to_decoder_recursively( decoder_modules[decoder_name], encoder_modules[encoder_name], module_name + "/" + name, uninitialized_encoder_weights, depth=depth + 1, ) all_encoder_weights.remove(module_name + "/" + encoder_name) uninitialized_encoder_weights += list(all_encoder_weights) # tie weights recursively tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights) if len(uninitialized_encoder_weights) > 0: logger.warning( f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}" ) def _tie_or_clone_weights(self, output_embeddings, input_embeddings): """ Tie or clone module weights depending of whether we are using TorchScript or not """ if self.config.torchscript: output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone()) else: output_embeddings.weight = input_embeddings.weight if getattr(output_embeddings, "bias", None) is not None: output_embeddings.bias.data = torch.nn.functional.pad( output_embeddings.bias.data, (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],), "constant", 0, ) if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"): output_embeddings.out_features = input_embeddings.num_embeddings def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding: """ Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`. Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method. Arguments: new_num_tokens (:obj:`int`, `optional`): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model wihtout doing anything. Return: :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. """ base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed model_embeds = base_model._resize_token_embeddings(new_num_tokens) if new_num_tokens is None: return model_embeds # Update base model and current model config self.config.vocab_size = new_num_tokens base_model.vocab_size = new_num_tokens # Tie weights again if needed self.tie_weights() return model_embeds def _resize_token_embeddings(self, new_num_tokens): old_embeddings = self.get_input_embeddings() new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) self.set_input_embeddings(new_embeddings) return self.get_input_embeddings() def _get_resized_embeddings( self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None ) -> torch.nn.Embedding: """ Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end Args: old_embeddings (:obj:`torch.nn.Embedding`): Old embeddings to be resized. new_num_tokens (:obj:`int`, `optional`): New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens :obj:`torch.nn.Embedding`` module of the model wihtout doing anything. Return: :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if :obj:`new_num_tokens` is :obj:`None` """ if new_num_tokens is None: return old_embeddings old_num_tokens, old_embedding_dim = old_embeddings.weight.size() if old_num_tokens == new_num_tokens: return old_embeddings # Build new embeddings new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim) new_embeddings.to(old_embeddings.weight.device) # initialize all new embeddings (in particular added tokens) self._init_weights(new_embeddings) # Copy token embeddings from the previous weights num_tokens_to_copy = min(old_num_tokens, new_num_tokens) new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :] return new_embeddings def init_weights(self): """ Initializes and prunes weights if needed. """ # Initialize weights self.apply(self._init_weights) # Prune heads if needed if self.config.pruned_heads: self.prune_heads(self.config.pruned_heads) # Tie weights if needed self.tie_weights() def prune_heads(self, heads_to_prune: Dict[int, List[int]]): """ Prunes heads of the base model. Arguments: heads_to_prune (:obj:`Dict[int, List[int]]`): Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2. """ # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads for layer, heads in heads_to_prune.items(): union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads) self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON self.base_model._prune_heads(heads_to_prune) def save_pretrained(self, save_directory): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method. Arguments: save_directory (:obj:`str`): Directory to which to save. Will be created if it doesn't exist. """ if os.path.isfile(save_directory): logger.error("Provided path ({}) should be a directory, not a file".format(save_directory)) return os.makedirs(save_directory, exist_ok=True) # Only save the model itself if we are using distributed training model_to_save = self.module if hasattr(self, "module") else self # Attach architecture to the config model_to_save.config.architectures = [model_to_save.__class__.__name__] # If we save using the predefined names, we can load using `from_pretrained` output_model_file = os.path.join(save_directory, WEIGHTS_NAME) if getattr(self.config, "xla_device", False): import torch_xla.core.xla_model as xm if xm.is_master_ordinal(): # Save configuration file model_to_save.config.save_pretrained(save_directory) # xm.save takes care of saving only from master xm.save(model_to_save.state_dict(), output_model_file) else: model_to_save.config.save_pretrained(save_directory) torch.save(model_to_save.state_dict(), output_model_file) logger.info("Model weights saved in {}".format(output_model_file)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): r""" Instantiate a pretrained pytorch model from a pre-trained model configuration. The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To train the model, you should first set it back in training mode with ``model.train()``. The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (:obj:`str`, `optional`): Can be either: - A string with the `shortcut name` of a pretrained model to load from cache or download, e.g., ``bert-base-uncased``. - A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g., ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards. - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``). model_args (sequence of positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. config (:obj:`Union[PretrainedConfig, str]`, `optional`): Can be either: - an instance of a class derived from :class:`~transformers.PretrainedConfig`, - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`. Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when: - The model is a model provided by the library (loaded with the `shortcut name` string of a pretrained model). - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory. - The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory. state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`): A state dictionary to use instead of a state dictionary loaded from saved weights file. This option can be used if you want to create a model from a pretrained configuration but load your own weights. In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option. cache_dir (:obj:`str`, `optional`): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`): Load the model weights from a TensorFlow checkpoint save file (see docstring of ``pretrained_model_name_or_path`` argument). force_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (:obj:`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether ot not to also return a dictionnary containing missing keys, unexpected keys and error messages. local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to only look at local files (e.g., not try doanloading the model). use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attention=True`). Behaves differently depending on whether a ``config`` is provided or automatically loaded: - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done) - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function. Examples:: from transformers import BertConfig, BertModel # Download model and configuration from S3 and cache. model = BertModel.from_pretrained('bert-base-uncased') # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable). model = BertModel.from_pretrained('./test/saved_model/') # Update configuration during loading. model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) assert model.config.output_attention == True # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable). config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json') model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config) """ config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) output_loading_info = kwargs.pop("output_loading_info", False) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, *model_args, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, **kwargs, ) else: model_kwargs = kwargs # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")): # Load from a TF 1.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index") elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)): # Load from a TF 2.0 checkpoint archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME) elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} or `from_tf` set to False".format( [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path, ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert ( from_tf ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME), use_cdn=use_cdn, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = ( f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n" f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n" ) raise EnvironmentError(msg) if resolved_archive_file == archive_file: logger.info("loading weights file {}".format(archive_file)) else: logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config, *model_args, **model_kwargs) if state_dict is None and not from_tf: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") if "state_dict" in state_dict.keys(): #Loading models that store optimizer states, etc along with state_dict in ckpt state_dict = state_dict["state_dict"] except Exception: raise OSError( "Unable to load weights from pytorch checkpoint file. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) missing_keys = [] unexpected_keys = [] error_msgs = [] if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index' else: # Load from our TensorFlow 2.0 checkpoints try: from transformers import load_tf2_checkpoint_in_pytorch_model model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True) except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see " "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions." ) raise else: # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if "model." in key: new_key = key.replace("model.", "") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. def load(module: nn.Module, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs, ) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") # Make sure we are able to load base models as well as derived models (with heads) start_prefix = "" model_to_load = model has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys()) if not hasattr(model, cls.base_model_prefix) and has_prefix_module: start_prefix = cls.base_model_prefix + "." if hasattr(model, cls.base_model_prefix) and not has_prefix_module: model_to_load = getattr(model, cls.base_model_prefix) load(model_to_load, prefix=start_prefix) if model.__class__.__name__ != model_to_load.__class__.__name__: base_model_state_dict = model_to_load.state_dict().keys() head_model_state_dict_without_base_prefix = [ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() ] missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) # Some models may have keys that are not in the state by design, removing them before needlessly warning # the user. if cls.authorized_missing_keys is not None: for pat in cls.authorized_missing_keys: missing_keys = [k for k in missing_keys if re.search(pat, k) is None] if len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " f"initializing {model.__class__.__name__}: {unexpected_keys}\n" f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).\n" f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: logger.warning( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " f"and are newly initialized: {missing_keys}\n" f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: logger.info( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" f"If your task is similar to the task the model of the checkpoint was trained on, " f"you can already use {model.__class__.__name__} for predictions without further training." ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(error_msgs) ) ) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() if output_loading_info: loading_info = { "missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs, } return model, loading_info if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available(): import torch_xla.core.xla_model as xm model = xm.send_cpu_data_to_device(model, xm.xla_device()) model.to(xm.xla_device()) return model class Conv1D(nn.Module): """ 1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (:obj:`int`): The number of output features. nx (:obj:`int`): The number of input features. """ def __init__(self, nf, nx): super().__init__() self.nf = nf w = torch.empty(nx, nf) nn.init.normal_(w, std=0.02) self.weight = nn.Parameter(w) self.bias = nn.Parameter(torch.zeros(nf)) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(*size_out) return x class PoolerStartLogits(nn.Module): """ Compute SQuAD start logits from sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. Returns: :obj:`torch.FloatTensor`: The start logits for SQuAD. """ x = self.dense(hidden_states).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerEndLogits(nn.Module): """ Compute SQuAD end logits from sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the :obj:`layer_norm_eps` to use. """ def __init__(self, config: PretrainedConfig): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense_1 = nn.Linear(config.hidden_size, 1) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): The hidden states of the first tokens for the labeled span. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): The position of the first token for the labeled span. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. .. note:: One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, ``start_positions`` overrides ``start_states``. Returns: :obj:`torch.FloatTensor`: The end logits for SQuAD. """ assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: slen, hsz = hidden_states.shape[-2:] start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions) # shape (bsz, 1, hsz) start_states = start_states.expand(-1, slen, -1) # shape (bsz, slen, hsz) x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1)) x = self.activation(x) x = self.LayerNorm(x) x = self.dense_1(x).squeeze(-1) if p_mask is not None: if next(self.parameters()).dtype == torch.float16: x = x * (1 - p_mask) - 65500 * p_mask else: x = x * (1 - p_mask) - 1e30 * p_mask return x class PoolerAnswerClass(nn.Module): """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model. """ def __init__(self, config): super().__init__() self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size) self.activation = nn.Tanh() self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False) def forward( self, hidden_states: torch.FloatTensor, start_states: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): The final hidden states of the model. start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`): The hidden states of the first tokens for the labeled span. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): The position of the first token for the labeled span. cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. .. note:: One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set, ``start_positions`` overrides ``start_states``. Returns: :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class. """ # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample. hsz = hidden_states.shape[-1] assert ( start_states is not None or start_positions is not None ), "One of start_states, start_positions should be not None" if start_positions is not None: start_positions = start_positions[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) start_states = hidden_states.gather(-2, start_positions).squeeze(-2) # shape (bsz, hsz) if cls_index is not None: cls_index = cls_index[:, None, None].expand(-1, -1, hsz) # shape (bsz, 1, hsz) cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, hsz) else: cls_token_state = hidden_states[:, -1, :] # shape (bsz, hsz) x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1)) x = self.activation(x) x = self.dense_1(x).squeeze(-1) return x @dataclass class SquadHeadOutput(ModelOutput): """ Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the ``is_impossible`` label of the answers. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None class SQuADHead(nn.Module): r""" A SQuAD head inspired by XLNet. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the :obj:`layer_norm_eps` to use. """ def __init__(self, config): super().__init__() self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig) def forward( self, hidden_states: torch.FloatTensor, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, cls_index: Optional[torch.LongTensor] = None, is_impossible: Optional[torch.LongTensor] = None, p_mask: Optional[torch.FloatTensor] = None, return_dict: bool = False, ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]: """ Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`): Final hidden states of the model on the sequence tokens. start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Positions of the first token for the labeled span. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Positions of the last token for the labeled span. cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token. is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Whether the question has a possible answer in the paragraph or not. p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`): Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token should be masked. return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to return a :class:`~transformers.file_utils.ModelOuput` instead of a plain tuple. Returns: """ start_logits = self.start_logits(hidden_states, p_mask=p_mask) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs) cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index) if not return_dict: return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) else: return SquadHeadOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, ) class SequenceSummary(nn.Module): r""" Compute a single vector summary of a sequence hidden states. Args: config (:class:`~transformers.PretrainedConfig`): The config used by the model. Relevant arguments in the config class of the model are (refer to the actual config class of your model for the default values it uses): - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are: - :obj:`"last"` -- Take the last token hidden state (like XLNet) - :obj:`"first"` -- Take the first token hidden state (like Bert) - :obj:`"mean"` -- Take the mean of all tokens hidden states - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2) - :obj:`"attn"` -- Not implemented now, use multi-head attention - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction. - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`). - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the output, another string or :obj:`None` will add no activation. - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and activation. - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and activation. """ def __init__(self, config: PretrainedConfig): super().__init__() self.summary_type = getattr(config, "summary_type", "last") if self.summary_type == "attn": # We should use a standard multi-head attention module with absolute positional embedding for that. # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276 # We can probably just use the multi-head attention module of PyTorch >=1.1.0 raise NotImplementedError self.summary = Identity() if hasattr(config, "summary_use_proj") and config.summary_use_proj: if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0: num_classes = config.num_labels else: num_classes = config.hidden_size self.summary = nn.Linear(config.hidden_size, num_classes) activation_string = getattr(config, "summary_activation", None) self.activation: Callable = (get_activation(activation_string) if activation_string else Identity()) self.first_dropout = Identity() if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0: self.first_dropout = nn.Dropout(config.summary_first_dropout) self.last_dropout = Identity() if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0: self.last_dropout = nn.Dropout(config.summary_last_dropout) def forward( self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None ) -> torch.FloatTensor: """ Compute a single vector summary of a sequence hidden states. Args: hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`): The hidden states of the last layer. cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`): Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification token. Returns: :obj:`torch.FloatTensor`: The summary of the sequence hidden states. """ if self.summary_type == "last": output = hidden_states[:, -1] elif self.summary_type == "first": output = hidden_states[:, 0] elif self.summary_type == "mean": output = hidden_states.mean(dim=1) elif self.summary_type == "cls_index": if cls_index is None: cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long,) else: cls_index = cls_index.unsqueeze(-1).unsqueeze(-1) cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),)) # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states output = hidden_states.gather(-2, cls_index).squeeze(-2) # shape (bsz, XX, hidden_size) elif self.summary_type == "attn": raise NotImplementedError output = self.first_dropout(output) output = self.summary(output) output = self.activation(output) output = self.last_dropout(output) return output def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear: """ Prune a linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`torch.nn.Linear`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if layer.bias is not None: if dim == 1: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True if layer.bias is not None: new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D: """ Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed. Used to remove heads. Args: layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices. Returns: :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ index = index.to(layer.weight.device) W = layer.weight.index_select(dim, index).clone().detach() if dim == 0: b = layer.bias.clone().detach() else: b = layer.bias[index].clone().detach() new_size = list(layer.weight.size()) new_size[dim] = len(index) new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device) new_layer.weight.requires_grad = False new_layer.weight.copy_(W.contiguous()) new_layer.weight.requires_grad = True new_layer.bias.requires_grad = False new_layer.bias.copy_(b.contiguous()) new_layer.bias.requires_grad = True return new_layer def prune_layer( layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None ) -> Union[torch.nn.Linear, Conv1D]: """ Prune a Conv1D or linear layer to keep only entries in index. Used to remove heads. Args: layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune. index (:obj:`torch.LongTensor`): The indices to keep in the layer. dim (:obj:`int`, `optional`): The dimension on which to keep the indices. Returns: :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`. """ if isinstance(layer, nn.Linear): return prune_linear_layer(layer, index, dim=0 if dim is None else dim) elif isinstance(layer, Conv1D): return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim) else: raise ValueError("Can't prune layer of class {}".format(layer.__class__)) def apply_chunking_to_forward( forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors ) -> torch.Tensor: """ This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory. If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as directly applying :obj:`forward_fn` to :obj:`input_tensors`. Args: forward_fn (:obj:`Callable[..., torch.Tensor]`): The forward function of the model. chunk_size (:obj:`int`): The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`. chunk_dim (:obj:`int`): The dimension over which the :obj:`input_tensors` should be chunked. input_tensors (:obj:`Tuple[torch.Tensor]`): The input tensors of ``forward_fn`` which will be chunked. Returns: :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`foward_fn` would have given if applied`. Examples:: # rename the usual forward() fn to forward_chunk() def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states # implement a chunked forward function def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) """ assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors) tensor_shape = input_tensors[0].shape assert all( input_tensor.shape == tensor_shape for input_tensor in input_tensors ), "All input tenors have to be of the same shape" # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compability num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters) assert num_args_in_forward_chunk_fn == len( input_tensors ), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format( num_args_in_forward_chunk_fn, len(input_tensors) ) if chunk_size > 0: assert ( input_tensors[0].shape[chunk_dim] % chunk_size == 0 ), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format( input_tensors[0].shape[chunk_dim], chunk_size ) num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size # chunk input tensor into tuples input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors) # apply forward fn to every tuple output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks)) # concatenate output at same dimension return torch.cat(output_chunks, dim=chunk_dim) return forward_fn(*input_tensors)
PyTorch/Classification/ConvNets/efficientnet/training/AMP
AMP
DGXA100_efficientnet-widese-b4_AMP
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-widese-b4 --precision AMP --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
faster_rcnn_resnet101_voc07
# Faster R-CNN with Resnet-101 (v1), configured for Pascal VOC Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { faster_rcnn { num_classes: 20 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet101' first_stage_features_stride: 16 } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } first_stage_nms_score_threshold: 0.0 first_stage_nms_iou_threshold: 0.7 first_stage_max_proposals: 300 first_stage_localization_loss_weight: 2.0 first_stage_objectness_loss_weight: 1.0 initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { use_dropout: false dropout_keep_probability: 1.0 fc_hyperparams { op: FC regularizer { l2_regularizer { weight: 0.0 } } initializer { variance_scaling_initializer { factor: 1.0 uniform: true mode: FAN_AVG } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.0 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } second_stage_localization_loss_weight: 2.0 second_stage_classification_loss_weight: 1.0 } } train_config: { batch_size: 1 optimizer { momentum_optimizer: { learning_rate: { manual_step_learning_rate { initial_learning_rate: 0.0001 schedule { step: 500000 learning_rate: .00001 } schedule { step: 700000 learning_rate: .000001 } } } momentum_optimizer_value: 0.9 } use_moving_average: false } gradient_clipping_by_norm: 10.0 fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true num_steps: 800000 data_augmentation_options { random_horizontal_flip { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pascal_train.record" } label_map_path: "PATH_TO_BE_CONFIGURED/pascal_label_map.pbtxt" } eval_config: { num_examples: 4952 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/pascal_val.record" } label_map_path: "PATH_TO_BE_CONFIGURED/pascal_label_map.pbtxt" shuffle: false num_readers: 1 }
PyTorch/LanguageModeling/BERT
BERT
README
# BERT For PyTorch This repository provides a script and recipe to train the BERT model for PyTorch to achieve state-of-the-art accuracy and is tested and maintained by NVIDIA. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Glossary](#glossary) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Pre-training parameters](#pre-training-parameters) * [Fine tuning parameters](#fine-tuning-parameters) * [Multi-node](#multi-node) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Training process](#training-process) * [Pre-training](#pre-training) * [Fine-tuning](#fine-tuning) * [Inference process](#inference-process) * [Fine-tuning inference](#fine-tuning-inference) * [Deploying BERT using NVIDIA Triton Inference Server](#deploying-bert-using-nvidia-triton-inference-server) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Pre-training loss results: NVIDIA DGX A100 (8x A100 80GB)](#pre-training-loss-results-nvidia-dgx-a100-8x-a100-80gb) * [Pre-training loss curves](#pre-training-loss-curves) * [Fine-tuning accuracy results: NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-accuracy-results-nvidia-dgx-a100-8x-a100-80gb) * [Training stability test](#training-stability-test) * [Pre-training stability test](#pre-training-stability-test) * [Fine-tuning stability test](#fine-tuning-stability-test) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) * [Pre-training NVIDIA DGX A100 (8x A100 80GB)](#pre-training-nvidia-dgx-a100-8x-a100-80gb) * [Pre-training NVIDIA DGX A100 (8x A100 80GB) Multi-node Scaling](#pre-training-nvidia-dgx-a100-8x-a100-80gb-multi-node-scaling) * [Fine-tuning NVIDIA DGX A100 (8x A100 80GB)](#fine-tuning-nvidia-dgx-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-1 (8x V100 32G)](#training-performance-nvidia-dgx-1-8x-v100-32g) * [Pre-training NVIDIA DGX-1 With 32G](#pre-training-nvidia-dgx-1-with-32g) * [Fine-tuning NVIDIA DGX-1 With 32G](#fine-tuning-nvidia-dgx-1-with-32g) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb) * [Fine-tuning inference on NVIDIA DGX A100 (1x A100 80GB)](#fine-tuning-inference-on-nvidia-dgx-a100-1x-a100-80gb) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview BERT, or Bidirectional Encoder Representations from Transformers, is a new method of pre-training language representations that obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. This model is based on the [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) paper. NVIDIA's implementation of BERT is an optimized version of the [Hugging Face implementation](https://github.com/huggingface/pytorch-pretrained-BERT), leveraging mixed precision arithmetic and Tensor Cores on NVIDIA Volta V100 and NVIDIA Ampere A100 GPUs for faster training times while maintaining target accuracy. This repository contains scripts to interactively launch data download, training, benchmarking, and inference routines in a Docker container for both pre-training and fine-tuning tasks such as question answering. The major differences between the original implementation of the paper and this version of BERT are as follows: - Scripts to download the Wikipedia dataset - Scripts to preprocess downloaded data into inputs and targets for pre-training in a modular fashion - Fused [LAMB](https://arxiv.org/pdf/1904.00962.pdf) optimizer to support training with larger batches - Fused Adam optimizer for fine-tuning tasks - Fused CUDA kernels for better performance LayerNorm - Automatic mixed precision (AMP) training support - Scripts to launch on multiple number of nodes Other publicly available implementations of BERT include: 1. [NVIDIA TensorFlow](https://github.com/NVIDIA/DeepLearningExamples/tree/master/TensorFlow/LanguageModeling/BERT) 2. [Hugging Face](https://github.com/huggingface/pytorch-pretrained-BERT) 3. [codertimo](https://github.com/codertimo/BERT-pytorch) 4. [gluon-nlp](https://github.com/dmlc/gluon-nlp/tree/v0.10.x/scripts/bert) 5. [Google's implementation](https://github.com/google-research/bert) This model trains with mixed precision Tensor Cores on NVIDIA Volta and provides a push-button solution to pre-training on a corpus of choice. As a result, researchers can get results 4x faster than training without Tensor Cores. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture The BERT model uses the same architecture as the encoder of the Transformer. Input sequences are projected into an embedding space before being fed into the encoder structure. Additionally, positional and segment encodings are added to the embeddings to preserve positional information. The encoder structure is simply a stack of Transformer blocks, which consist of a multi-head attention layer followed by successive stages of feed-forward networks and layer normalization. The multi-head attention layer accomplishes self-attention on multiple input representations. An illustration of the architecture taken from the [Transformer paper](https://arxiv.org/pdf/1706.03762.pdf) is shown below. ![BERT](images/model.png) ### Default configuration The architecture of the BERT model is almost identical to the Transformer model that was first introduced in the [Attention Is All You Need paper](https://arxiv.org/pdf/1706.03762.pdf). The main innovation of BERT lies in the pre-training step, where the model is trained on two unsupervised prediction tasks using a large text corpus. Training on these unsupervised tasks produces a generic language model, which can then be quickly fine-tuned to achieve state-of-the-art performance on language processing tasks such as question answering. The BERT paper reports the results for two configurations of BERT, each corresponding to a unique model size. This implementation provides the same configurations by default, which are described in the table below. | **Model** | **Hidden layers** | **Hidden unit size** | **Attention heads** | **Feedforward filter size** | **Max sequence length** | **Parameters** | |:---------:|:-----------------:|:--------------------:|:-------------------:|:---------------------------:|:-----------------------:|:--------------:| | BERTBASE | 12 encoder | 768 | 12 | 4 x 768 | 512 | 110M | | BERTLARGE | 24 encoder | 1024 | 16 | 4 x 1024 | 512 | 330M | ### Feature support matrix The following features are supported by this model. | **Feature** | **BERT** | |:-----------:|:--------:| | PyTorch AMP | Yes | | PyTorch DDP | Yes | | LAMB | Yes | | Multi-node | Yes | | LDDL | Yes | | NVFuser | Yes | #### Features [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension with NVIDIA-maintained utilities to streamline mixed precision and distributed training, whereas [AMP](https://nvidia.github.io/apex/amp.html) is an abbreviation used for automatic mixed precision training. [DDP](https://nvidia.github.io/apex/parallel.html) stands for DistributedDataParallel and is used for multi-GPU training. [LAMB](https://arxiv.org/pdf/1904.00962.pdf) stands for Layerwise Adaptive Moments based optimizer, is a large batch optimization technique that helps accelerate training of deep neural networks using large minibatches. It allows using a global batch size of 65536 and 32768 on sequence lengths 128 and 512 respectively, compared to a batch size of 256 for [Adam](https://arxiv.org/pdf/1412.6980.pdf). The optimized implementation accumulates 1024 gradient batches in phase 1 and 4096 steps in phase 2 before updating weights once. This results in a 15% training speedup. On multi-node systems, LAMB allows scaling up to 1024 GPUs resulting in training speedups of up to 72x in comparison to Adam. Adam has limitations on the learning rate that can be used since it is applied globally on all parameters whereas LAMB follows a layerwise learning rate strategy. NVLAMB adds the necessary tweaks to [LAMB version 1](https://arxiv.org/abs/1904.00962v1), to ensure correct convergence. The algorithm is as follows: ![NVLAMB](images/nvlamb.png) [LDDL](../../../Tools/lddl) is a library that enables scalable data preprocessing and loading. LDDL is used by this PyTorch BERT example. NVFuser is NVIDIA's fusion backend for PyTorch. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [tensor cores](https://developer.nvidia.com/tensor-cores) in the NVIDIA Volta, and following with both the NVIDIA Turing and NVIDIA Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. For information about: - How to train using mixed precision, refer to the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, refer to the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - APEX tools for mixed precision training, refer to the [NVIDIA APEX: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision In this repository, mixed precision training is enabled by NVIDIA’s APEX library. The APEX library has an automatic mixed precision module that allows mixed precision to be enabled with minimal code changes. Automatic mixed precision can be enabled with the following code changes: ``` from apex import amp if fp16: # Wrap optimizer and model model, optimizer = amp.initialize(model, optimizer, opt_level=<opt_level>, loss_scale="dynamic") if fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() ``` Where `<opt_level>` is the optimization level. In the pre-training, `O2` is set as the optimization level. Mixed precision training can be turned on by passing the `fp16` argument to the `run_pretraining.py` and `run_squad.py`. All shell scripts have a positional argument available to enable mixed precision training. #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math, also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on NVIDIA Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require a high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ### Glossary **Fine-tuning** Training an already pre-trained model further using a task-specific dataset for subject-specific refinements by adding task-specific layers on top if required. **Language Model** Assigns a probability distribution over a sequence of words. Given a sequence of words, it assigns a probability to the whole sequence. **Pre-training** Training a model on vast amounts of data on the same (or different) task to build general understandings. **Transformer** The paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762) introduces a novel architecture called Transformer that uses an attention mechanism and transforms one sequence into another. **Phase 1** Pre-training on samples of sequence length 128 and 20 masked predictions per sequence. **Phase 2** Pre-training on samples of sequence length 512 and 80 masked predictions per sequence. ## Setup The following section lists the requirements that you need to meet in order to start training the BERT model. ### Requirements This repository contains a Dockerfile that extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [PyTorch 21.11-py3 NGC container or later](https://ngc.nvidia.com/registry/nvidia-pytorch) - Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, refer to the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) - [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, refer to the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/dgx/support-matrix/index.html). For multi-node, the sample provided in this repository requires [Enroot](https://github.com/NVIDIA/enroot) and [Pyxis](https://github.com/NVIDIA/pyxis) set up on a [SLURM](https://slurm.schedmd.com) cluster. More information on how to set up and launch can be found in the [Multi-node Documentation](https://docs.nvidia.com/ngc/multi-node-bert-user-guide). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the BERT model. Training configurations to run on 8 x A100 80G, 8 x V100 16G, 16 x V100 32G cards and examples of usage are provided at the end of this section. For the specifics concerning training and inference, refer to the [Advanced](#advanced) section. 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples.git cd DeepLearningExamples/PyTorch/LanguageModeling/BERT ``` 2. Download the NVIDIA pre-trained checkpoint. If you want to use a pre-trained checkpoint, visit [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_large_pretraining_amp_lamb/files?version=20.03.0). This pre-trained checkpoint is used to fine-tune on SQuAD. Ensure you unzip the downloaded file and place the checkpoint in the `checkpoints/` folder. For a checkpoint already fine-tuned for QA on SQuAD v1.1 visit [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_large_qa_squad11_amp/files). Find all trained and available checkpoints in the table below: | Model | Description | |------------------------|---------------------------------------------------------------------------| | [bert-large-uncased-qa](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_large_qa_squad11_amp/files) | Large model fine-tuned on SQuAD v1.1 | | [bert-large-uncased-sst2](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_large_ft_sst2_amp) |Large model fine-tuned on GLUE SST-2 | | [bert-large-uncased-pretrained](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_large_pretraining_amp_lamb/files?version=20.03.0) | Large model pretrained checkpoint on Generic corpora like Wikipedia| | [bert-base-uncased-qa](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_base_qa_squad11_amp/files) | Base model fine-tuned on SQuAD v1.1 | | [bert-base-uncased-sst2](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_base_ft_sst2_amp_128/files) | Base model fine-tuned on GLUE SST-2 | | [bert-base-uncased-pretrained](https://catalog.ngc.nvidia.com/orgs/nvidia/models/bert_pyt_ckpt_base_pretraining_amp_lamb/files) | Base model pretrained checkpoint on Generic corpora like Wikipedia. | | [bert-dist-4L-288D-uncased-qa](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_4l_288d_qa_squad11_amp/files) | 4 layer distilled model fine-tuned on SQuAD v1.1 | | [bert-dist-4L-288D-uncased-sst2](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_4l_288d_ft_sst2_amp/files) | 4 layer distilled model fine-tuned on GLUE SST-2 | | [bert-dist-4L-288D-uncased-pretrained](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_4l_288d_pretraining_amp/files) | 4 layer distilled model pretrained checkpoint on Generic corpora like Wikipedia. | | [bert-dist-6L-768D-uncased-qa](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_6l_768d_qa_squad11_amp/files) | 6 layer distilled model fine-tuned on SQuAD v1.1 | | [bert-dist-6L-768D-uncased-sst2](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_6l_768d_ft_sst2_amp/files) | 6 layer distilled model fine-tuned on GLUE SST-2 | | [bert-dist-6L-768D-uncased-pretrained](https://catalog.ngc.nvidia.com/orgs/nvidia/teams/dle/models/bert_pyt_ckpt_distilled_6l_768d_pretraining_amp/files) | 6 layer distilled model pretrained checkpoint on Generic corpora like Wikipedia. | 3. Build BERT on top of the NGC container. ``` bash scripts/docker/build.sh ``` 4. Start an interactive session in the NGC container to run training/inference. ``` bash scripts/docker/launch.sh ``` Resultant logs and checkpoints of pre-training and fine-tuning routines are stored in the `results/` folder. `data` and `vocab.txt` are downloaded in the `data/` directory by default. Refer to the [Getting the data](#getting-the-data) section for more details on how to process a custom corpus as required for BERT pre-training. 5. Download the dataset. This repository provides scripts to download, verify, and extract the following datasets: - [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) (fine-tuning for question answering) - [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) (fine-tuning for paraphrase detection) - [SST-2](https://nlp.stanford.edu/sentiment/index.html) (fine-tuning for sentiment analysis) - Wikipedia (pre-training) To download, verify, extract the datasets, run: ``` /workspace/bert/data/create_datasets_from_start.sh ``` Note: For fine-tuning only, downloading the Wikipedia dataset can be skipped by commenting it out. Note: Ensure a complete Wikipedia download. But if the download failed in LDDL, remove the output directory `data/wikipedia/` and start over again. 6. Start pre-training. To run on a single node 8 x V100 32G cards, from within the container, you can use the following script to run pre-training. ``` bash scripts/run_pretraining.sh ``` The default hyperparameters are set to run on 8x V100 32G cards. To run on multiple nodes, refer to the [Multi-node](#multi-node) section. 7. Start fine-tuning with the SQuAD dataset. The above pre-trained BERT representations can be fine-tuned with just one additional output layer for a state-of-the-art question answering system. Running the following script launches fine-tuning for question answering with the SQuAD dataset. ``` bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint> ``` 8. Start fine-tuning with the GLUE tasks. The above pre-trained BERT representations can be fine-tuned with just one additional output layer for GLUE tasks. Running the following scripts launch fine-tuning for paraphrase detection with the MRPC dataset: ``` bash scripts/run_glue.sh /workspace/bert/checkpoints/<pre-trained_checkpoint> ``` 9. Run Knowledge Distillation (Optional). To get setup to run distillation on BERT, follow steps provided [here](./distillation/README.md). 10. Start validation/evaluation. For both SQuAD and GLUE, validation can be performed with the `bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>` or `bash scripts/run_glue.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>`, setting `mode` to `eval` in `scripts/run_squad.sh` or `scripts/run_glue.sh` as follows: ``` mode=${11:-"eval"} ``` 11. Start inference/predictions. Inference can be performed with the `bash scripts/run_squad.sh /workspace/bert/checkpoints/<pre-trained_checkpoint>`, setting `mode` to `prediction` in `scripts/run_squad.sh` or `scripts/run_glue.sh` as follows: ``` mode=${11:-"prediction"} ``` Inference predictions are saved to `<OUT_DIR>/predictions.json`, set in `scripts/run_squad.sh` or `scripts/run_glue.sh` as follows: ``` OUT_DIR=${10:-"/workspace/bert/results/SQuAD"} # For SQuAD. # Or… out_dir=${5:-"/workspace/bert/results/MRPC"} # For MRPC. # Or... out_dir=${5:-"/workspace/bert/results/SST-2"} # For SST-2. ``` This repository contains a number of predefined configurations to run the SQuAD, GLUE and pre-training on NVIDIA DGX-1, NVIDIA DGX-2H or NVIDIA DGX A100 nodes in `scripts/configs/squad_config.sh`, `scripts/configs/glue_config.sh` and `scripts/configs/pretrain_config.sh`. For example, to use the default DGX A100 8 gpu config, run: ``` bash scripts/run_squad.sh $(source scripts/configs/squad_config.sh && dgxa100-80g_8gpu_fp16) # For the SQuAD v1.1 dataset. bash scripts/run_glue.sh $(source scripts/configs/glue_config.sh && mrpc_dgxa100-80g_8gpu_fp16) # For the MRPC dataset. bash scripts/run_glue.sh $(source scripts/configs/glue_config.sh && sst-2_dgxa100-80g_8gpu_fp16) # For the SST-2 dataset. bash scripts/run_pretraining.sh $(source scripts/configs/pretrain_config.sh && dgxa100-80g_8gpu_fp16) # For pre-training ``` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code Descriptions of the key scripts and folders are provided below. - `data/` - Contains scripts for downloading and preparing individual datasets and will contain downloaded and processed datasets. - `scripts/` - Contains shell scripts to launch data download, pre-training, and fine-tuning. - `run_squad.sh` - Interface for launching question answering fine-tuning with `run_squad.py`. - `run_glue.sh` - Interface for launching paraphrase detection and sentiment analysis fine-tuning with `run_glue.py`. - `run_pretraining.sh` - Interface for launching BERT pre-training with `run_pretraining.py`. - `create_pretraining_data.py` - Creates `.hdf5` files from shared text files in the final step of dataset creation. - `model.py` - Implements the BERT pre-training and fine-tuning model architectures with PyTorch. - `optimization.py` - Implements the LAMB optimizer with PyTorch. - `run_squad.py` - Implements fine-tuning training and evaluation for question answering on the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset. - `run_glue.py` - Implements fine-tuning training and evaluation for [GLUE](https://gluebenchmark.com/) tasks. - `run_pretraining.py` - Implements BERT pre-training. ### Parameters #### Pre-training parameters BERT is designed to pre-train deep bidirectional networks for language representations. The following scripts replicate pre-training on Wikipedia from this [paper](https://arxiv.org/pdf/1810.04805.pdf). These scripts are general and can be used for pre-training language representations on any corpus of choice. The complete list of the available parameters for the `run_pretraining.py` script is : ``` --input_dir INPUT_DIR - The input data directory. Should contain .hdf5 files for the task. --config_file CONFIG_FILE - Path to a json file describing the BERT model configuration. This file configures the model architecture, such as the number of transformer blocks, number of attention heads, etc. --bert_model BERT_MODEL - Specifies the type of BERT model to use; should be one of the following: bert-base-uncased bert-large-uncased bert-base-cased bert-base-multilingual bert-base-chinese --output_dir OUTPUT_DIR - Path to the output directory where the model checkpoints will be written. --init_checkpoint - Initial checkpoint to start pre-training from (Usually a BERT pre-trained checkpoint) --max_seq_length MAX_SEQ_LENGTH - The maximum total input sequence length after WordPiece tokenization. Sequences longer than this will be truncated, and sequences shorter than this will be padded. --max_predictions_per_seq MAX_PREDICTIONS_PER_SEQ - The maximum total of masked tokens per input sequence for Masked LM. --train_batch_size TRAIN_BATCH_SIZE - Batch size per GPU for training. --learning_rate LEARNING_RATE - The initial learning rate for the LAMB optimizer. --max_steps MAX_STEPS - Total number of training steps to perform. --warmup_proportion WARMUP_PROPORTION - Proportion of training to perform linear learning rate warmup for. For example, 0.1 = 10% of training. --seed SEED - Sets the seed to use for random number generation. --gradient_accumulation_steps GRADIENT_ACCUMULATION_STEPS - Number of update steps to accumulate before performing a backward/update pass. --allreduce_post_accumulation - If set to true, performs allreduce only after the defined number of gradient accumulation steps. --allreduce_post_accumulation_fp16 - If set to true, performs allreduce after gradient accumulation steps in FP16. --amp or --fp16 - If set, performs computations using automatic mixed precision. --loss_scale LOSS_SCALE - Sets the loss scaling value to use when mixed precision is used. The default value (0) tells the script to use dynamic loss scaling instead of fixed loss scaling. --log_freq LOG_FREQ - If set, the script outputs the training loss every LOG_FREQ step. --resume_from_checkpoint - If set, training resumes from a checkpoint that currently exists in OUTPUT_DIR. --num_steps_per_checkpoint NUM_STEPS_PER_CHECKPOINT - Number of update steps until a model checkpoint is saved to disk. --phase2 - Specified if training on phase 2 only. If not specified, default pre-training is on phase 1. --phase1_end_step - The number of steps phase 1 was trained for. In order to resume phase 2 the correct way; phase1_end_step should correspond to the --max_steps phase 1 was trained for. ``` #### Fine tuning parameters * SQuAD Default arguments are listed below in the order `scripts/run_squad.sh` expects: - Initial checkpoint - The default is `/workspace/checkpoints/bert_uncased.pt`. - Number of training Epochs - The default is `2`. - Batch size - The default is `3`. - Learning rate - The default is `3e-5`. - Precision (either `fp16`, `tf32` or `fp32`) - The default is `fp16`. - Number of GPUs - The default is `8`. - Seed - The default is `1`. - SQuAD directory - The default is `/workspace/bert/data/v1.1`. - Vocabulary file (token to ID mapping) - The default is `/workspace/bert/vocab/vocab`. - Output directory for results - The default is `/results/SQuAD`. - Mode (`train`, `eval`, `train eval`, `predict`) - The default is `train`. - Config file for the BERT model (It should be the same as the pre-trained model) - The default is `/workspace/bert/bert_config.json`. The script saves the final checkpoint to the `/results/SQuAD/pytorch_model.bin` file. * GLUE Default arguments are listed below in the order `scripts/run_glue.sh` expects: - Initial checkpoint - The default is `/workspace/bert/checkpoints/bert_uncased.pt`. - Data directory - The default is `/workspace/bert/data/download/glue/MRPC/`. - Vocabulary file (token to ID mapping) - The default is `/workspace/bert/vocab/vocab`. - Config file for the BERT model (It should be the same as the pre-trained model) - The default is `/workspace/bert/bert_config.json`. - Output directory for result - The default is `/workspace/bert/results/MRPC`. - The name of the GLUE task (`mrpc` or `sst-2`) - The default is `mrpc` - Number of GPUs - The default is `8`. - Batch size per GPU - The default is `16`. - Number of update steps to accumulate before performing a backward/update pass (this option effectively normalizes the GPU memory footprint down by the same factor) - The default is `1`. - Learning rate - The default is `2.4e-5`. - The proportion of training samples used to warm up the learning rate - The default is `0.1`. - Number of training Epochs - The default is `3`. - Total number of training steps to perform - The default is `-1.0`, which means it is determined by the number of epochs. - Precision (either `fp16`, `tf32` or `fp32`) - The default is `fp16`. - Seed - The default is `2`. - Mode (`train`, `eval`, `prediction`, `train eval`, `train prediction`, `eval prediction`, `train eval prediction`) - The default is `train eval`. #### Multi-node Multi-node runs can be launched on a pyxis/enroot Slurm cluster (refer to [Requirements](#requirements)) with the `run.sub` script with the following command for a 4-node DGX-1 example for both phase 1 and phase 2: ``` BATCHSIZE=2048 LR=6e-3 GRADIENT_STEPS=128 PHASE=1 sbatch -N4 --ntasks-per-node=8 run.sub BATCHSIZE=1024 LR=4e-3 GRADIENT_STEPS=256 PHASE=2 sbatch -N4 --ntasks-per-node=8 run.sub ``` Checkpoints after phase 1 will be saved in `checkpointdir` specified in `run.sub`. The checkpoint will be automatically picked up to resume training on phase 2. Note that phase 2 should be run after phase 1. Variables to re-run the [Training performance results](#training-performance-results) are available in the `configurations.yml` file. The batch variables `BATCHSIZE`, `LR`, `GRADIENT_STEPS`,`PHASE` refer to the Python arguments `train_batch_size`, `learning_rate`, `gradient_accumulation_steps`, `phase2` respectively. Note that the `run.sub` script is a starting point that has to be adapted depending on the environment. In particular, variables such as `datadir` handle the location of the files for each phase. Refer to the file’s contents to find the full list of variables to adjust for your system. ### Command-line options To view the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: `python run_pretraining.py --help` `python run_squad.py --help` `python run_glue.py --help` Detailed descriptions of command-line options can be found in the [Parameters](#parameters) section. ### Getting the data For pre-training BERT, we use the Wikipedia (2500M words) dataset. We extract only the text passages and ignore headers, lists, and tables. BERT requires that datasets are structured as a document level corpus rather than a shuffled sentence-level corpus because it is critical to extract long contiguous sentences. `data/create_datasets_from_start.sh` uses the LDDL downloader to download the Wikipedia dataset, and `scripts/run_pretraining.sh` uses the LDDL preprocessor and load balancer to preprocess the Wikipedia dataset into Parquet shards which are then streamed during the pre-training by the LDDL data loader. Refer to [LDDL's README](../../../Tools/lddl/README.md) for more information on how to use LDDL. Depending on the speed of your internet connection, downloading and extracting the Wikipedia dataset takes a few hours, and running the LDDL preprocessor and load balancer takes half an hour on a single DGXA100 node. For fine-tuning a pre-trained BERT model for specific tasks, by default, this repository prepares the following dataset: - [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/): for question answering - [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398): for paraphrase detection. - [SST-2](https://nlp.stanford.edu/sentiment/index.html): for sentiment analysis. #### Dataset guidelines The procedure to prepare a text corpus for pre-training is described in the above section. This section provides additional insight into how exactly raw text is processed so that it is ready for pre-training. First, raw text is tokenized using [WordPiece tokenization](https://arxiv.org/pdf/1609.08144.pdf). A [CLS] token is inserted at the start of every sequence, and the two sentences in the sequence are separated by a [SEP] token. Note: BERT pre-training looks at pairs of sentences at a time. A sentence embedding token [A] is added to the first sentence and token [B] to the next. BERT pre-training optimizes for two unsupervised classification tasks. The first is Masked Language Modeling (Masked LM). One training instance of Masked LM is a single modified sentence. Each token in the sentence has a 15% chance of being replaced by a [MASK] token. The chosen token is replaced with [MASK] 80% of the time, 10% with a random token and the remaining 10% the token is retained. The task is then to predict the original token. The second task is next sentence prediction. One training instance of BERT pre-training is two sentences (a sentence pair). A sentence pair may be constructed by simply taking two adjacent sentences from a single document or by pairing up two random sentences with equal probability. The goal of this task is to predict whether or not the second sentence followed the first in the original document. ### Training process The training process consists of two steps: pre-training and fine-tuning. #### Pre-training Pre-training is performed using the `run_pretraining.py` script along with parameters defined in the `scripts/run_pretraining.sh`. The `run_pretraining.sh` script runs a job on a single node that trains the BERT-large model from scratch using the Wikipedia dataset as training data using the LAMB optimizer. By default, the training script runs two phases of training with a hyperparameter recipe specific to 8x V100 32G cards: Phase 1: (Maximum sequence length of 128) - Runs on 8 GPUs with a training batch size of 64 per GPU - Uses a learning rate of 6e-3 - Has FP16 precision enabled - Runs for 7038 steps, where the first 28.43% (2000) are warm-up steps - Saves a checkpoint every 200 iterations (keeps only the latest three checkpoints) and at the end of training. All checkpoints and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). - Creates a log file containing all the output Phase 2: (Maximum sequence length of 512) - Runs on 8 GPUs with a training batch size of 8 per GPU - Uses a learning rate of 4e-3 - Has FP16 precision enabled - Runs for 1563 steps, where the first 12.8% are warm-up steps - Saves a checkpoint every 200 iterations (keeps only the latest three checkpoints) and at the end of training. All checkpoints and training logs are saved to the `/results` directory (in the container which can be mounted to a local directory). - Creates a log file containing all the output These parameters will train on the Wikipedia dataset to state-of-the-art accuracy on a DGX-1 with 32GB V100 cards. `bash run_pretraining.sh <training_batch_size> <learning-rate> <precision> <num_gpus> <warmup_proportion> <training_steps> <save_checkpoint_steps> <resume_training> <create_logfile> <accumulate_gradients> <gradient_accumulation_steps> <seed> <job_name> <allreduce_post_accumulation> <allreduce_post_accumulation_fp16> <accumulate_into_fp16> <train_bath_size_phase2> <learning_rate_phase2> <warmup_proportion_phase2> <train_steps_phase2> <gradient_accumulation_steps_phase2> ` Where: - `<training_batch_size>` is per-GPU batch size used for training. Larger batch sizes run more efficiently but require more memory. - `<learning_rate>` is the base learning rate for training - `<precision>` is the type of math in your model, which can be either `fp32` or `fp16`. The options mean: - FP32: 32-bit IEEE single precision floats. - FP16: Mixed precision 16 and 32-bit floats. - `<num_gpus>` is the number of GPUs to use for training. Must be equal to or smaller than the number of GPUs attached to your node. - `<warmup_proportion>` is the percentage of training steps used for warm-up at the start of training. - `<training_steps>` is the total number of training steps. - `<save_checkpoint_steps>` controls how often checkpoints are saved. - `<resume_training>` if set to `true`, training should resume from the latest model in `/results/checkpoints`. Default is `false`. - `<create_logfile>` a flag indicating if output should be written to a log file or not (acceptable values are `true` or 'false`. `true` indicates output should be saved to a log file.) - `<accumulate_gradient>` a flag indicating whether a larger batch should be simulated with gradient accumulation. - `<gradient_accumulation_steps>` an integer indicating the number of steps to accumulate gradients over. Effective batch size = `training_batch_size` / `gradient_accumulation_steps`. - `<seed>` random seed for the run. - `<allreduce_post_accumulation>` - If set to `true`, performs `allreduce` only after the defined number of gradient accumulation steps. - `<allreduce_post_accumulation_fp16>` - If set to `true`, performs `allreduce` after gradient accumulation steps in FP16. Note: The above two options need to be set to false when running either TF32 or FP32. - `<training_batch_size_phase2>` is per-GPU batch size used for training in phase 2. Larger batch sizes run more efficiently but require more memory. - `<learning_rate_phase2>` is the base learning rate for training phase 2. - `<warmup_proportion_phase2>` is the percentage of training steps used for warm-up at the start of training. - `<training_steps_phase2>` is the total number of training steps for phase 2, to be continued in addition to phase 1. - `<gradient_accumulation_steps_phase2>` an integer indicating the number of steps to accumulate gradients over in phase 2. Effective batch size = `training_batch_size_phase2` / `gradient_accumulation_steps_phase2`. - `<init_checkpoint>` A checkpoint to start the pre-training routine on (Usually a BERT pre-trained checkpoint). For example: `bash scripts/run_pretraining.sh` Trains BERT-large from scratch on a DGX-1 32G using FP16 arithmetic. 90% of the training steps are done with sequence length 128 (phase 1 of training), and 10% of the training steps are done with sequence length 512 (phase 2 of training). To train on a DGX-1 16G, set `gradient_accumulation_steps` to `512` and `gradient_accumulation_steps_phase2` to `1024` in `scripts/run_pretraining.sh`. To train on a DGX-2 32G, set `train_batch_size` to `4096`, `train_batch_size_phase2` to `2048`, `num_gpus` to `16`, `gradient_accumulation_steps` to `64` and `gradient_accumulation_steps_phase2` to `256` in `scripts/run_pretraining.sh` In order to run a pre-training routine on an initial checkpoint, perform the following in `scripts/run_pretraining.sh`: - point the `init_checkpoint` variable to the location of the checkpoint - set `resume_training` to `true` - Note: The parameter value assigned to `BERT_CONFIG` during training should remain unchanged. Also, to resume pre-training on your corpus of choice, the training dataset should be created using the same vocabulary file used in `data/create_datasets_from_start.sh`. #### Fine-tuning Fine-tuning is provided for a variety of tasks. The following tasks are included with this repository through the following scripts: - Question Answering (`scripts/run_squad.sh`) - Paraphrase Detection and Sentiment Analysis (`script/run_glue.sh`) By default, each Python script implements fine-tuning a pre-trained BERT model for a specified number of training epochs as well as evaluation of the fine-tuned model. Each shell script invokes the associated Python script with the following default parameters: - Uses 8 GPUs - Has FP16 precision enabled - Saves a checkpoint at the end of training to the `results/<dataset_name>` folder Fine-tuning Python scripts implement support for mixed precision and multi-GPU training through NVIDIA’s [APEX](https://github.com/NVIDIA/apex) library. For a full list of parameters and associated explanations, refer to the [Parameters](#parameters) section. The fine-tuning shell scripts have positional arguments outlined below: ``` # For SQuAD. bash scripts/run_squad.sh <checkpoint_to_load> <epochs> <batch_size per GPU> <learning rate> <precision (either `fp16` or `fp32`)> <number of GPUs to use> <seed> <SQuAD_DATA_DIR> <VOCAB_FILE> <OUTPUT_DIR> <mode (either `train`, `eval` or `train eval`)> <CONFIG_FILE> # For GLUE bash scripts/run_glue.sh <checkpoint_to_load> <data_directory> <vocab_file> <config_file> <out_dir> <task_name> <number of GPUs to use> <batch size per GPU> <gradient_accumulation steps> <learning_rate> <warmup_proportion> <epochs> <precision (either `fp16` or `fp32` or `tf32`)> <seed> <mode (either `train`, `eval`, `prediction`, `train eval`, `train prediction`, `eval prediction` or `train eval prediction`)> ``` By default, the mode positional argument is set to train eval. Refer to the [Quick Start Guide](#quick-start-guide) for explanations of each positional argument. Note: The first positional argument (the path to the checkpoint to load) is required. Each fine-tuning script assumes that the corresponding dataset files exist in the `data/` directory or separate path can be a command-line input to `run_squad.sh`. ### Inference process Fine-tuning inference can be run in order to obtain predictions on fine-tuning tasks, for example, Q&A on SQuAD. #### Fine-tuning inference Evaluation fine-tuning is enabled by the same scripts as training: - Question Answering (`scripts/run_squad.sh`) - Paraphrase Detection and Sentiment Analysis (`scripts/run_glue.sh`) The mode positional argument of the shell script is used to run in evaluation mode. The fine-tuned BERT model will be run on the evaluation dataset, and the evaluation loss and accuracy will be displayed. Each inference shell script expects dataset files to exist in the same locations as the corresponding training scripts. The inference scripts can be run with default settings. By setting the `mode` variable in the script to either `eval` or `prediction` flag, you can choose between running predictions and evaluating them on a given dataset or just obtain the model predictions. `bash scripts/run_squad.sh <path to fine-tuned model checkpoint>` `bash scripts/run_glue.sh <path to fine-tuned model checkpoint>` For SQuAD, to run inference interactively on question-context pairs, use the script `inference.py` as follows: `python inference.py --bert_model "bert-large-uncased" --init_checkpoint=<fine_tuned_checkpoint> --config_file="bert_config.json" --vocab_file=<path to vocab file> --question="What food does Harry like?" --context="My name is Harry and I grew up in Canada. I love apples."` ### Deploying BERT using NVIDIA Triton Inference Server The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or GRPC endpoint, allowing remote clients to request inferencing for any model being managed by the server. More information on how to perform inference using NVIDIA Triton Inference Server can be found in [triton/README.md](./triton/README.md). ## Performance ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark Training performance benchmarks for pre-training can be obtained by running `scripts/run_pretraining.sh`, and for fine-tuning can be obtained by running `scripts/run_squad.sh` or `scripts/run_glue.sh` for SQuAD or GLUE, respectively. The required parameters can be passed through the command-line as described in [Training process](#training-process). As an example, to benchmark the training performance on a specific batch size for SQuAD, run: `bash scripts/run_squad.sh <pre-trained checkpoint path> <epochs> <batch size> <learning rate> <fp16|fp32> <num_gpus> <seed> <path to SQuAD dataset> <path to vocab set> <results directory> train <BERT config path] <max steps>` An example call used to generate throughput numbers: `bash scripts/run_squad.sh /workspace/bert/bert_large_uncased.pt 2.0 4 3e-5 fp16 8 42 /workspace/bert/squad_data /workspace/bert/scripts/vocab/vocab /results/SQuAD train /workspace/bert/bert_config.json -1` #### Inference performance benchmark Inference performance benchmarks for both fine-tuning can be obtained by running `scripts/run_squad.sh` and `scripts/run_glue.sh` respectively. The required parameters can be passed through the command-line as described in [Inference process](#inference-process). As an example, to benchmark the inference performance on a specific batch size for SQuAD, run: `bash scripts/run_squad.sh <pre-trained checkpoint path> <epochs> <batch size> <learning rate> <fp16|fp32> <num_gpus> <seed> <path to SQuAD dataset> <path to vocab set> <results directory> eval <BERT config path> <max steps>` An example call used to generate throughput numbers: `bash scripts/run_squad.sh /workspace/bert/bert_large_uncased.pt 2.0 4 3e-5 fp16 8 42 /workspace/bert/squad_data /workspace/bert/scripts/vocab/vocab /results/SQuAD eval /workspace/bert/bert_config.json -1` ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results Our results were obtained by running the `scripts/run_squad.sh` and `scripts/run_pretraining.sh` training scripts in the pytorch:21.11-py3 NGC container unless otherwise specified. ##### Pre-training loss results: NVIDIA DGX A100 (8x A100 80GB) | DGX System | GPUs / Node | Batch size / GPU (Phase 1 and Phase 2) | Accumulated Batch size / GPU (Phase 1 and Phase 2) | Accumulation steps (Phase 1 and Phase 2) | Final Loss - TF32 | Final Loss - mixed precision | Time to train(hours) - TF32 | Time to train(hours) - mixed precision | Time to train speedup (TF32 to mixed precision) | |--------------------|-------------|----------------------------------------------------|------------------------------------------|-------------------|------------------------------|-----------------------------|----------------------------------------|-------------------------------------------------|-----| | 32 x DGX A100 80GB | 8 | 256 and 32 | 256 and 128 | 1 and 4 | --- | 1.2437 | --- | 1.2 | 1.9 | | 32 x DGX A100 80GB | 8 | 128 and 16 | 256 and 128 | 2 and 8 | 1.2465 | --- | 2.4 | --- | --- | ##### Pre-training loss curves ![Pre-training Loss Curves](images/loss_curves.png) ##### Fine-tuning accuracy results: NVIDIA DGX A100 (8x A100 80GB) * SQuAD | GPUs | Batch size / GPU (TF32 and FP16) | Accuracy - TF32(% F1) | Accuracy - mixed precision(% F1) | Time to train(hours) - TF32 | Time to train(hours) - mixed precision | Time to train speedup (TF32 to mixed precision) | |------|----------------------------------|-----------------------|----------------------------------|-----------------------------|----------------------------------------|-------------------------------------------------| | 8 | 32 | 90.93 | 90.96 | 0.102 | 0.0574 | 1.78 | * MRPC | GPUs | Batch size / GPU (TF32 and FP16) | Accuracy - TF32(%) | Accuracy - mixed precision(%) | Time to train(seconds) - TF32 | Time to train(seconds) - mixed precision | Time to train speedup (TF32 to mixed precision) | |------|----------------------------------|--------------------|-------------------------------|-------------------------------|------------------------------------------|-------------------------------------------------| | 8 | 16 | 87.25 | 88.24 | 17.26 | 7.31 | 2.36 | * SST-2 | GPUs | Batch size / GPU (TF32 and FP16) | Accuracy - TF32(%) | Accuracy - mixed precision(%) | Time to train(seconds) - TF32 | Time to train(seconds) - mixed precision | Time to train speedup (TF32 to mixed precision) | |------|----------------------------------|--------------------|-------------------------------|-------------------------------|------------------------------------------|-------------------------------------------------| | 8 | 128 | 91.97 | 92.78 | 119.28 | 62.59 | 1.91 | ##### Training stability test ###### Pre-training stability test | Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Mean | Standard Deviation | |-----------------|--------|--------|--------|--------|--------|-------|--------------------| | Final Loss | 1.260 | 1.265 | 1.304 | 1.256 | 1.242 | 1.265 | 0.023 | ###### Fine-tuning stability test * SQuAD Training stability with 8 GPUs, FP16 computations, batch size of 4: | Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Seed 5 | Seed 6 | Seed 7 | Seed 8 | Seed 9 | Mean | Standard Deviation | |-----------------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------|-------|--------------------| | Exact Match % | 83.64 | 84.05 | 84.51 | 83.69 | 83.87 | 83.94 | 84.27 | 83.97 | 83.75 | 83.92 | 83.96 | 0.266 | | f1 % | 90.60 | 90.65 | 90.96 | 90.44 | 90.58 | 90.78 | 90.81 | 90.82 | 90.51 | 90.68 | 90.68 | 0.160 | * MRPC Training stability with 8 A100 GPUs, FP16 computations, batch size of 16 per GPU: | Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Seed 5 | Seed 6 | Seed 7 | Seed 8 | Seed 9 | Mean | Standard Deviation | |-----------------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------|-------|--------------------| | Exact Match % | 85.78 | 85.54 | 84.56 | 86.27 | 84.07 | 86.76 | 87.01 | 85.29 | 88.24 | 86.52 | 86.00 | 1.225 | > Note: Since MRPC is a very small dataset where overfitting can often occur, the resulting validation accuracy can often have high variance. By repeating the above experiments for 100 seeds, the max accuracy is 88.73, and the average accuracy is 82.56 with a standard deviation of 6.01. * SST-2 Training stability with 8 A100 GPUs, FP16 computations, batch size of 128 per GPU: | Accuracy Metric | Seed 0 | Seed 1 | Seed 2 | Seed 3 | Seed 4 | Seed 5 | Seed 6 | Seed 7 | Seed 8 | Seed 9 | Mean | Standard Deviation | |-----------------|--------|--------|--------|--------|--------|--------|--------|--------|--------|--------|-------|--------------------| | Exact Match % | 91.86 | 91.28 | 91.86 | 91.74 | 91.28 | 91.86 | 91.40 | 91.97 | 91.40 | 92.78 | 91.74 | 0.449 | #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 80GB) Our results were obtained by running the `scripts run_pretraining.sh` training script in the pytorch:21.11-py3 NGC container on NVIDIA DGX A100 (8x A100 80GB) GPUs. Performance numbers (in items/images per second) were averaged over a few training iterations. ###### Pre-training NVIDIA DGX A100 (8x A100 80GB) | GPUs | Batch size / GPU (TF32 and FP16) | Accumulated Batch size / GPU (TF32 and FP16) | Accumulation steps (TF32 and FP16) | Sequence length | Throughput - TF32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision | |------|----------------------------------|------------------------------------|-----------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------|----| | 1 | 128 and 256 | 8192 and 8192 | 64 and 32 | 128 | 317 | 580 | 1.83 | 1.00 | 1.00 | | 8 | 128 and 256 | 8192 and 8192 | 64 and 32 | 128 | 2505 | 4591 | 1.83 | 7.90 | 7.91 | | 1 | 16 and 32 | 4096 and 4096 | 256 and 128 | 512 | 110 | 210 | 1.90 | 1.00 | 1.00 | | 8 | 16 and 32 | 4096 and 4096 | 256 and 128 | 512 | 860 | 1657 | 1.92 | 7.81 | 7.89 | ###### Pre-training NVIDIA DGX A100 (8x A100 80GB) Multi-node Scaling | Nodes | GPUs / node | Batch size / GPU (TF32 and FP16) | Accumulated Batch size / GPU (TF32 and FP16) | Accumulation steps (TF32 and FP16) | Sequence length | Mixed Precision Throughput | Mixed Precision Strong Scaling | TF32 Throughput | TF32 Strong Scaling | Speedup (Mixed Precision to TF32) | |-------|-------------|----------------------------------|------------------------------------|-----------------|----------------------------|--------------------------------|-----------------|---------------------|-----------------------------------|-----| | 1 | 8 | 126 and 256 | 8192 and 8192 | 64 and 32 | 128 | 4553 | 1 | 2486 | 1 | 1.83 | | 2 | 8 | 126 and 256 | 4096 and 4096 | 32 and 16 | 128 | 9191 | 2.02 | 4979 | 2.00 | 1.85 | | 4 | 8 | 126 and 256 | 2048 and 2048 | 16 and 18 | 128 | 18119 | 3.98 | 9859 | 3.97 | 1.84 | | 8 | 8 | 126 and 256 | 1024 and 1024 | 8 and 4 | 128 | 35774 | 7.86 | 19815 | 7.97 | 1.81 | | 16 | 8 | 126 and 256 | 512 and 512 | 4 and 2 | 128 | 70555 | 15.50 | 38866 | 15.63 | 1.82 | | 32 | 8 | 126 and 256 | 256 and 256 | 2 and 1 | 128 | 138294 | 30.37 | 75706 | 30.45 | 1.83 | | 1 | 8 | 16 and 32 | 4096 and 4096 | 256 and 128 | 512 | 1648 | 1 | 854 | 1 | 1.93 | | 2 | 8 | 16 and 32 | 2048 and 2048 | 128 and 64 | 512 | 3291 | 2.00 | 1684 | 1.97 | 1.95 | | 4 | 8 | 16 and 32 | 1024 and 1024 | 64 and 32 | 512 | 6464 | 3.92 | 3293 | 3.86 | 1.96 | | 8 | 8 | 16 and 32 | 512 and 512 | 32 and 16 | 512 | 13005 | 7.89 | 6515 | 7.63 | 2.00 | | 16 | 8 | 16 and 32 | 256 and 256 | 16 and 8 | 512 | 25570 | 15.51 | 12131 | 14.21 | 2.11 | | 32 | 8 | 16 and 32 | 128 and 128 | 8 and 4 | 512 | 49663 | 30.13 | 21298 | 24.95 | 2.33 | ###### Fine-tuning NVIDIA DGX A100 (8x A100 80GB) * SQuAD | GPUs | Batch size / GPU (TF32 and FP16) | Throughput - TF32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (TF32 - mixed precision) | Weak scaling - TF32 | Weak scaling - mixed precision | |------|----------------------------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------| | 1 | 32 and 32 | 61.5 | 110.5 | 1.79 | 1.00 | 1.00 | | 8 | 32 and 32 | 469.8 | 846.7 | 1.80 | 7.63 | 7.66 | ##### Training performance: NVIDIA DGX-1 (8x V100 32G) Our results were obtained by running the `scripts/run_pretraining.sh` and `scripts/run_squad.sh` training scripts in the pytorch:21.11-py3 NGC container on NVIDIA DGX-1 with (8x V100 32G) GPUs. Performance numbers (in sequences per second) were averaged over a few training iterations. ###### Pre-training NVIDIA DGX-1 With 32G | GPUs | Batch size / GPU (FP32 and FP16) | Accumulation steps (FP32 and FP16) | Sequence length | Throughput - FP32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | |------|----------------------------------|------------------------------------|-----------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------| | 1 | 4096 and 4096 | 128 and 64 | 128 | 50 | 224 | 4.48 | 1.00 | 1.00 | | 8 | 4096 and 4096 | 128 and 64 | 128 | 387 | 1746 | 4.51 | 7.79 | 7.79 | | 1 | 2048 and 2048 | 512 and 256 | 512 | 19 | 75 | 3.94 | 1.00 | 1.00 | | 8 | 2048 and 2048 | 512 and 256 | 512 | 149.6 | 586 | 3.92 | 7.87 | 7.81 | ###### Fine-tuning NVIDIA DGX-1 With 32G * SQuAD | GPUs | Batch size / GPU (FP32 and FP16) | Throughput - FP32(sequences/sec) | Throughput - mixed precision(sequences/sec) | Throughput speedup (FP32 - mixed precision) | Weak scaling - FP32 | Weak scaling - mixed precision | |------|----------------------------------|----------------------------------|---------------------------------------------|---------------------------------------------|---------------------|--------------------------------| | 1 | 8 and 16 | 12 | 52 | 4.33 | 1.00 | 1.00 | | 8 | 8 and 16 | 85.5 | 382 | 4.47 | 7.12 | 7.34 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). #### Inference performance results ##### Inference performance: NVIDIA DGX A100 (1x A100 80GB) Our results were obtained by running `scripts/run_squad.sh` in the pytorch:21.11-py3 NGC container on NVIDIA DGX A100 with (1x A100 80G) GPUs. ###### Fine-tuning inference on NVIDIA DGX A100 (1x A100 80GB) * SQuAD | GPUs | Batch Size \(TF32/FP16\) | Sequence Length | Throughput \- TF32\(sequences/sec\) | Throughput \- Mixed Precision\(sequences/sec\) | |------|--------------------------|-----------------|-------------------------------------|------------------------------------------------| | 1 | 32/32 | 384 | 216 | 312 | To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). The inference performance metrics used were sequences/second. ## Release notes ### Changelog January 2022 - Knowledge Distillation support - Pre-training with native AMP, native DDP, and TorchScript with NVFuser backend - Pre-training using [Language Datasets and Data Loaders (LDDL)](../../../Tools/lddl) - Binned pretraining for phase2 with LDDL using a bin size of 64 July 2020 - Updated accuracy and performance tables to include A100 results - Fine-tuning with the MRPC and SST-2 datasets. March 2020 - TRITON Inference Server support. February 2020 - Integrate DLLogger. November 2019 - Use LAMB from APEX. - Code cleanup. - Bug fix in BertAdam optimizer. September 2019 - Scripts to support a multi-node launch. - Update pre-training loss results based on the latest data preparation scripts. August 2019 - Pre-training support with LAMB optimizer. - Updated Data download and Preprocessing. July 2019 - Initial release. ### Known issues There are no known issues with this model.
TensorFlow/Classification/ConvNets/model/blocks
blocks
conv2d_block
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from model import layers __all__ = ['conv2d_block'] def conv2d_block( inputs, n_channels, kernel_size=(3, 3), strides=(2, 2), mode='SAME', use_batch_norm=True, activation='relu', is_training=True, data_format='NHWC', conv2d_hparams=None, batch_norm_hparams=None, name='conv2d', cardinality=1, ): if not isinstance(conv2d_hparams, tf.contrib.training.HParams): raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`") if not isinstance(batch_norm_hparams, tf.contrib.training.HParams) and use_batch_norm: raise ValueError("The paramater `conv2d_hparams` is not of type `HParams`") with tf.variable_scope(name): if cardinality == 1: net = layers.conv2d( inputs, n_channels=n_channels, kernel_size=kernel_size, strides=strides, padding=mode, data_format=data_format, use_bias=not use_batch_norm, trainable=is_training, kernel_initializer=conv2d_hparams.kernel_initializer, bias_initializer=conv2d_hparams.bias_initializer) else: group_filter = tf.get_variable( name=name + 'group_filter', shape=[3, 3, n_channels // cardinality, n_channels], trainable=is_training, dtype=tf.float32) net = tf.nn.conv2d(inputs, group_filter, strides=strides, padding='SAME', data_format=data_format) if use_batch_norm: net = layers.batch_norm( net, decay=batch_norm_hparams.decay, epsilon=batch_norm_hparams.epsilon, scale=batch_norm_hparams.scale, center=batch_norm_hparams.center, is_training=is_training, data_format=data_format, param_initializers=batch_norm_hparams.param_initializers ) if activation == 'relu': net = layers.relu(net, name='relu') elif activation == 'tanh': net = layers.tanh(net, name='tanh') elif activation != 'linear' and activation is not None: raise KeyError('Invalid activation type: `%s`' % activation) return net
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/optim
optim
adam
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections.abc import Collection import torch import torch.distributed as dist import torch.optim from common.fairseq.optim.fp16_optimizer import FairseqOptimizer from common.fairseq.optim.fused_adam import get_fused_adam_class class FairseqAdam(FairseqOptimizer): """Adam optimizer for fairseq. Important note: this optimizer corresponds to the "AdamW" variant of Adam in its weight decay behavior. As such, it is most closely analogous to torch.optim.AdamW from PyTorch. """ def __init__(self, cfg, params): super().__init__(cfg) fused_adam_cls = get_fused_adam_class() use_fused_adam = ( not getattr(cfg, "use_old_adam", False) and fused_adam_cls is not None and torch.cuda.is_available() ) if use_fused_adam: self._optimizer = fused_adam_cls(params, **self.optimizer_config) else: self._optimizer = Adam(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.cfg.lr[0] if isinstance(self.cfg.lr, Collection) else self.cfg.lr, "betas": eval(self.cfg.adam_betas) if isinstance(self.cfg.adam_betas, str) else self.cfg.adam_betas, "eps": self.cfg.adam_eps, "weight_decay": self.cfg.weight_decay, } def average_params(self): """Reduce Params is only used during BMUF distributed training.""" state_dict = self.optimizer.state_dict() total_gpus = float(dist.get_world_size()) for _, value in state_dict["state"].items(): value["exp_avg"] /= total_gpus value["exp_avg_sq"] /= total_gpus dist.all_reduce(value["exp_avg"], op=dist.ReduceOp.SUM) dist.all_reduce(value["exp_avg_sq"], op=dist.ReduceOp.SUM) class Adam(torch.optim.Optimizer): r"""Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__( self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False, ): defaults = dict( lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad ) super(Adam, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return True def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError( "Adam does not support sparse gradients, please consider SparseAdam instead" ) amsgrad = group.get("amsgrad", False) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p_data_fp32) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state["max_exp_avg_sq"] = torch.zeros_like(p_data_fp32) else: state["exp_avg"] = state["exp_avg"].to(p_data_fp32) state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32) if amsgrad: state["max_exp_avg_sq"] = state["max_exp_avg_sq"].to( p_data_fp32 ) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] if amsgrad: max_exp_avg_sq = state["max_exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group["eps"]) else: denom = exp_avg_sq.sqrt().add_(group["eps"]) bias_correction1 = 1 - beta1 ** state["step"] bias_correction2 = 1 - beta2 ** state["step"] step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 if group["weight_decay"] != 0: p_data_fp32.add_( p_data_fp32, alpha=-group["weight_decay"] * group["lr"] ) p_data_fp32.addcdiv_(exp_avg, denom, value=-step_size) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
PyTorch/Classification/GPUNet/configs
configs
model_hub
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import pathlib from pathlib import Path import shutil import urllib.request from typing import Any, Callable from zipfile import ZipFile from tqdm.auto import tqdm # Predefined model config files MODEL_ZOO_KEYS_B1_NGC = {} MODEL_ZOO_KEYS_B1_NGC["GV100"] = {} # GPUNet-0: 0.62ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.65ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_0_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-1: 0.85ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.85ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_1_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-2: 1.76ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["1.75ms"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_2_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-D1: 1.25ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["1.25ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d1_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-D2: 2.25ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["2.25ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_d2_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-P0: 0.5ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.5ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p0_pyt_ckpt/versions/21.12.0_amp/zip" # GPUNet-P1: 0.8ms on GV100 MODEL_ZOO_KEYS_B1_NGC["GV100"]["0.8ms-D"] = "https://api.ngc.nvidia.com/v2/models/nvidia/dle/gpunet_p1_pyt_ckpt/versions/21.12.0_amp/zip" MODEL_ZOO_BATCH_NGC = { "1": MODEL_ZOO_KEYS_B1_NGC, } MODEL_ZOO_NAME2TYPE_B1 = {} MODEL_ZOO_NAME2TYPE_B1["GPUNet-0"] = "0.65ms" MODEL_ZOO_NAME2TYPE_B1["GPUNet-1"] = "0.85ms" MODEL_ZOO_NAME2TYPE_B1["GPUNet-2"] = "1.75ms" MODEL_ZOO_NAME2TYPE_B1["GPUNet-P0"] = "0.5ms-D" MODEL_ZOO_NAME2TYPE_B1["GPUNet-P1"] = "0.8ms-D" MODEL_ZOO_NAME2TYPE_B1["GPUNet-D1"] = "1.25ms-D" MODEL_ZOO_NAME2TYPE_B1["GPUNet-D2"] = "2.25ms-D" def get_model_list(batch: int = 1): """Get a list of models in model zoo.""" batch = str(batch) err_msg = "Batch {} is not yet optimized.".format(batch) assert batch in MODEL_ZOO_BATCH_NGC.keys(), err_msg return list(MODEL_ZOO_BATCH_NGC[batch].keys()) def get_configs( batch: int = 1, latency: str = "GPUNet_1ms", gpuType: str = "GV100", config_root_dir: str = "./configs", download: bool = True ): """Get file with model config (downloads if necessary).""" batch = str(batch) errMsg0 = "Batch {} not found, available batches are {}".format( batch, list(MODEL_ZOO_BATCH_NGC.keys()) ) assert batch in MODEL_ZOO_BATCH_NGC.keys(), errMsg0 availGPUs = list(MODEL_ZOO_BATCH_NGC[batch].keys()) errMsg1 = "GPU {} not found, available GPUs are {}".format(gpuType, availGPUs) assert gpuType in availGPUs, errMsg1 errMsg2 = "Latency {} not found, available Latencies are {}".format( latency, list(MODEL_ZOO_BATCH_NGC[batch][gpuType]) ) assert latency in MODEL_ZOO_BATCH_NGC[batch][gpuType].keys(), errMsg2 print("testing:", " batch=", batch, " latency=", latency, " gpu=", gpuType) configPath = config_root_dir + "/batch" + str(batch) configPath += "/" + gpuType + "/" + latency + ".json" checkpointPath = config_root_dir + "/batch" + str(batch) + "/" checkpointPath += gpuType + "/" ngcCheckpointPath = Path(checkpointPath) checkpointPath += latency + ".pth.tar" ngcUrl = MODEL_ZOO_BATCH_NGC[batch][gpuType][latency] if download: download_checkpoint_ngc(ngcUrl, ngcCheckpointPath) with open(configPath) as configFile: modelJSON = json.load(configFile) configFile.close() return modelJSON, checkpointPath def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None: """ Unzip acrhive to provided path Args: checkpoint_path: Path where archive has to be unpacked archive_path: Path to archive Archive filename Returns: None """ checkpoint_path.mkdir(parents=True, exist_ok=True) with ZipFile(archive_path, "r") as zf: zf.extractall(path=checkpoint_path) archive_path.unlink() def download_progress(t: Any) -> Callable: """ Progress bar Args: t: progress Returns: Callable """ last_b = [0] def update_to(b: int = 1, bsize: int = 1, tsize: int = None): if tsize not in (None, -1): t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return update_to def download_checkpoint_ngc(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None: """ Download checkpoint from given url to provided path Args: checkpoint_url: Url from which checkpoint has to be downloaded checkpoint_path: Path where checkpoint has to be stored Returns: None """ with tqdm(unit="B") as t: reporthook = download_progress(t) result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook) filename = result[0] file_path = pathlib.Path(filename) assert file_path.is_file() or file_path.is_dir(), "Checkpoint was not downloaded" shutil.move(file_path, checkpoint_path.parent / file_path.name) archive_path = checkpoint_path.parent / file_path.name unzip(checkpoint_path, archive_path)
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/trainer/callbacks/callbacks
callbacks
save_checkpoint
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: callbacks.ctl_callbacks.SaveCheckpoint
PyTorch/Segmentation/MaskRCNN/pytorch/configs/caffe2
caffe2
e2e_mask_rcnn_R_101_FPN_1x_caffe2
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://Caffe2Detectron/COCO/35861795/e2e_mask_rcnn_R-101-FPN_1x" BACKBONE: CONV_BODY: "R-101-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False MASK_ON: True DATASETS: TEST: ("coco_2014_minival",) DATALOADER: SIZE_DIVISIBILITY: 32
TensorFlow/LanguageModeling/BERT/notebooks
notebooks
input
{"data": [ {"title": "Project Apollo", "paragraphs": [ {"context":"The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President John F. Kennedy's national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975.", "qas": [ { "question": "What project put the first Americans into space?", "id": "Q1" }, { "question": "What program was created to carry out these projects and missions?", "id": "Q2" }, { "question": "What year did the first manned Apollo flight occur?", "id": "Q3" }, { "question": "What President is credited with the original notion of putting Americans in space?", "id": "Q4" }, { "question": "Who did the U.S. collaborate with on an Earth orbit mission in 1975?", "id": "Q5" }, { "question": "How long did Project Apollo run?", "id": "Q6" }, { "question": "What program helped develop space travel techniques that Project Apollo used?", "id": "Q7" }, {"question": "What space station supported three manned missions in 1973-1974?", "id": "Q8" } ]}]}]}
Tools/PyTorch/TimeSeriesPredictionPlatform/models/tft_pyt/triton/runner
runner
logger
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import pathlib import coloredlogs class Logger(logging.Logger): def __init__(self, name, level=logging.NOTSET): super().__init__(name, level=level) self._file_path = None def initialize(self, file_path: pathlib.Path): self._file_path = file_path def write(self, log: str): if not self._file_path: return with open(self._file_path, "+a") as file: file.write(log) LOGGER = Logger("runner") log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(format=log_format) coloredlogs.install( level=logging.INFO, fmt=log_format, logger=LOGGER, field_styles={ "asctime": {"color": "green"}, "hostname": {"color": "magenta"}, "levelname": {"bold": True, "color": "blue"}, "name": {"color": "blue"}, "programname": {"color": "cyan"}, "username": {"color": "yellow"}, }, reconfigure=True, )
TensorFlow/LanguageModeling/BERT/data
data
bertPrep
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import BookscorpusTextFormatting import Downloader import TextSharding import WikicorpusTextFormatting import PubMedTextFormatting import argparse import itertools import multiprocessing import os import pprint import subprocess def main(args): working_dir = os.environ['BERT_PREP_WORKING_DIR'] print('Working Directory:', working_dir) print('Action:', args.action) print('Dataset Name:', args.dataset) if args.input_files: args.input_files = args.input_files.split(',') hdf5_tfrecord_folder_prefix = "/lower_case_" + str(args.do_lower_case) + "_seq_len_" + str(args.max_seq_length) \ + "_max_pred_" + str(args.max_predictions_per_seq) + "_masked_lm_prob_" + str(args.masked_lm_prob) \ + "_random_seed_" + str(args.random_seed) + "_dupe_factor_" + str(args.dupe_factor) \ + "_shard_" + str(args.n_training_shards) + "_test_split_" + str(int(args.fraction_test_set * 100)) directory_structure = { 'download' : working_dir + '/download', # Downloaded and decompressed 'extracted' : working_dir +'/extracted', # Extracted from whatever the initial format is (e.g., wikiextractor) 'formatted' : working_dir + '/formatted_one_article_per_line', # This is the level where all sources should look the same 'sharded' : working_dir + '/sharded', 'tfrecord' : working_dir + '/tfrecord' + hdf5_tfrecord_folder_prefix, 'hdf5': working_dir + '/hdf5'+ hdf5_tfrecord_folder_prefix, } print('\nDirectory Structure:') pp = pprint.PrettyPrinter(indent=2) pp.pprint(directory_structure) print('') if args.action == 'download': if not os.path.exists(directory_structure['download']): os.makedirs(directory_structure['download']) downloader = Downloader.Downloader(args.dataset, directory_structure['download']) downloader.download() elif args.action == 'text_formatting': assert args.dataset != 'google_pretrained_weights' and args.dataset != 'nvidia_pretrained_weights' \ and args.dataset != 'squad' and args.dataset != 'mrpc' and args.dataset != 'cola' and \ args.dataset != 'mnli' and args.dataset != 'sst-2', 'Cannot perform text_formatting on pretrained weights' if not os.path.exists(directory_structure['extracted']): os.makedirs(directory_structure['extracted']) if not os.path.exists(directory_structure['formatted']): os.makedirs(directory_structure['formatted']) if args.dataset == 'bookscorpus': books_path = directory_structure['download'] + '/bookscorpus' #books_path = directory_structure['download'] output_filename = directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt' books_formatter = BookscorpusTextFormatting.BookscorpusTextFormatting(books_path, output_filename, recursive=True) books_formatter.merge() elif args.dataset == 'wikicorpus_en': if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_en.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) wiki_path = directory_structure['extracted'] + '/wikicorpus_en' output_filename = directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) wiki_formatter.merge() elif args.dataset == 'wikicorpus_zh': assert False, 'wikicorpus_zh not fully supported at this time. The simplified/tradition Chinese data needs to be translated and properly segmented still, and should work once this step is added.' if args.skip_wikiextractor == 0: path_to_wikiextractor_in_container = '/workspace/wikiextractor/WikiExtractor.py' wikiextractor_command = path_to_wikiextractor_in_container + ' ' + directory_structure['download'] + '/' + args.dataset + '/wikicorpus_zh.xml ' + '-b 100M --processes ' + str(args.n_processes) + ' -o ' + directory_structure['extracted'] + '/' + args.dataset print('WikiExtractor Command:', wikiextractor_command) wikiextractor_process = subprocess.run(wikiextractor_command, shell=True, check=True) wiki_path = directory_structure['extracted'] + '/wikicorpus_zh' output_filename = directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt' wiki_formatter = WikicorpusTextFormatting.WikicorpusTextFormatting(wiki_path, output_filename, recursive=True) wiki_formatter.merge() elif args.dataset == 'pubmed_baseline': pubmed_path = directory_structure['download'] + '/pubmed' + '/baseline' output_filename = directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt' pubmed_formatter = PubMedTextFormatting.PubMedTextFormatting(pubmed_path, output_filename, recursive=True) pubmed_formatter.merge() elif args.action == 'sharding': # Note: books+wiki requires user to provide list of input_files (comma-separated with no spaces) if args.dataset == 'bookscorpus' or 'wikicorpus' in args.dataset or 'books_wiki' in args.dataset or 'pubmed' in args.dataset: if args.input_files is None: if args.dataset == 'bookscorpus': args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt'] elif args.dataset == 'wikicorpus_en': args.input_files = [directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] elif args.dataset == 'wikicorpus_zh': args.input_files = [directory_structure['formatted'] + '/wikicorpus_zh_one_article_per_line.txt'] elif args.dataset == 'books_wiki_en_corpus': args.input_files = [directory_structure['formatted'] + '/bookscorpus_one_book_per_line.txt', directory_structure['formatted'] + '/wikicorpus_en_one_article_per_line.txt'] elif args.dataset == 'pubmed_baseline': args.input_files = [directory_structure['formatted'] + '/pubmed_baseline_one_article_per_line.txt'] output_file_prefix = directory_structure['sharded'] + '/' + args.dataset + '/' + args.dataset if not os.path.exists(directory_structure['sharded']): os.makedirs(directory_structure['sharded']) if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset): os.makedirs(directory_structure['sharded'] + '/' + args.dataset) if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/training'): os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/training') if not os.path.exists(directory_structure['sharded'] + '/' + args.dataset + '/test'): os.makedirs(directory_structure['sharded'] + '/' + args.dataset + '/test') # Segmentation is here because all datasets look the same in one article/book/whatever per line format, and # it seemed unnecessarily complicated to add an additional preprocessing step to call just for this. # Different languages (e.g., Chinese simplified/traditional) may require translation and # other packages to be called from here -- just add a conditional branch for those extra steps segmenter = TextSharding.NLTKSegmenter() sharding = TextSharding.Sharding(args.input_files, output_file_prefix, args.n_training_shards, args.n_test_shards, args.fraction_test_set) sharding.load_articles() sharding.segment_articles_into_sentences(segmenter) sharding.distribute_articles_over_shards() sharding.write_shards_to_disk() else: assert False, 'Unsupported dataset for sharding' elif args.action == 'create_tfrecord_files': if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset) if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/training'): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/training') if not os.path.exists(directory_structure['tfrecord'] + "/" + args.dataset + '/test'): os.makedirs(directory_structure['tfrecord'] + "/" + args.dataset + '/test') last_process = None def create_record_worker(filename_prefix, shard_id, output_format='tfrecord', split='training'): bert_preprocessing_command = 'python /workspace/bert/utils/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure['tfrecord'] + '/' + args.dataset + '/' + split + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + str(args.max_seq_length) bert_preprocessing_command += ' --max_predictions_per_seq=' + str(args.max_predictions_per_seq) bert_preprocessing_command += ' --masked_lm_prob=' + str(args.masked_lm_prob) bert_preprocessing_command += ' --random_seed=' + str(args.random_seed) bert_preprocessing_command += ' --dupe_factor=' + str(args.dupe_factor) bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() return last_process output_file_prefix = args.dataset for i in range(args.n_training_shards): last_process = create_record_worker(output_file_prefix + '_training', i, 'tfrecord', 'training') last_process.wait() for i in range(args.n_test_shards): last_process = create_record_worker(output_file_prefix + '_test', i, 'tfrecord', 'test') last_process.wait() elif args.action == 'create_hdf5_files': assert False, 'HDF5 format not fully supported in this release.' if not os.path.exists(directory_structure['hdf5'] + "/" + args.dataset): os.makedirs(directory_structure['hdf5'] + "/" + args.dataset) last_process = None def create_record_worker(filename_prefix, shard_id, output_format='hdf5'): bert_preprocessing_command = 'python /workspace/bert/utils/create_pretraining_data.py' bert_preprocessing_command += ' --input_file=' + directory_structure['sharded'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.txt' bert_preprocessing_command += ' --output_file=' + directory_structure['hdf5'] + '/' + args.dataset + '/' + filename_prefix + '_' + str(shard_id) + '.' + output_format bert_preprocessing_command += ' --vocab_file=' + args.vocab_file bert_preprocessing_command += ' --do_lower_case' if args.do_lower_case else '' bert_preprocessing_command += ' --max_seq_length=' + args.max_seq_length bert_preprocessing_command += ' --max_predictions_per_seq=' + args.max_predictions_per_seq bert_preprocessing_command += ' --masked_lm_prob=' + args.masked_lm_prob bert_preprocessing_command += ' --random_seed=' + args.random_seed bert_preprocessing_command += ' --dupe_factor=' + args.dupe_factor bert_preprocessing_process = subprocess.Popen(bert_preprocessing_command, shell=True) last_process = bert_preprocessing_process # This could be better optimized (fine if all take equal time) if shard_id % args.n_processes == 0 and shard_id > 0: bert_preprocessing_process.wait() for i in range(args.n_training_shards): create_record_worker(args.output_file_prefix + '_training', i) last_process.wait() for i in range(args.n_test_shards): create_record_worker(args.output_file_prefix + '_test', i) last_process.wait() if __name__ == "__main__": parser = argparse.ArgumentParser( description='Preprocessing Application for Everything BERT-related' ) parser.add_argument( '--action', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords', choices={ 'download', # Download and verify mdf5/sha sums 'text_formatting', # Convert into a file that contains one article/book per line 'sharding', # Convert previous formatted text into shards containing one sentence per line 'create_tfrecord_files', # Turn each shard into a TFrecord with masking and next sentence prediction info 'create_hdf5_files' # Turn each shard into a HDF5 file with masking and next sentence prediction info } ) parser.add_argument( '--dataset', type=str, help='Specify the dataset to perform --action on', choices={ 'bookscorpus', 'wikicorpus_en', 'wikicorpus_zh', 'books_wiki_en_corpus', 'pubmed_baseline', 'pubmed_daily_update', 'pubmed_fulltext', 'pubmed_open_access', 'google_pretrained_weights', 'nvidia_pretrained_weights', 'squad', 'mrpc', 'sst-2', 'mnli', 'cola', 'all' } ) parser.add_argument( '--input_files', type=str, help='Specify the input files in a comma-separated list (no spaces)' ) parser.add_argument( '--n_training_shards', type=int, help='Specify the number of training shards to generate', default=1472 ) parser.add_argument( '--n_test_shards', type=int, help='Specify the number of test shards to generate', default=1472 ) parser.add_argument( '--fraction_test_set', type=float, help='Specify the fraction (0..1) of the data to withhold for the test data split (based on number of sequences)', default=0.1 ) parser.add_argument( '--segmentation_method', type=str, help='Specify your choice of sentence segmentation', choices={ 'nltk' }, default='nltk' ) parser.add_argument( '--n_processes', type=int, help='Specify the max number of processes to allow at one time', default=4 ) parser.add_argument( '--random_seed', type=int, help='Specify the base seed to use for any random number generation', default=12345 ) parser.add_argument( '--dupe_factor', type=int, help='Specify the duplication factor', default=5 ) parser.add_argument( '--masked_lm_prob', type=float, help='Specify the probability for masked lm', default=0.15 ) parser.add_argument( '--max_seq_length', type=int, help='Specify the maximum sequence length', default=512 ) parser.add_argument( '--max_predictions_per_seq', type=int, help='Specify the maximum number of masked words per sequence', default=20 ) parser.add_argument( '--do_lower_case', type=int, help='Specify whether it is cased (0) or uncased (1) (any number greater than 0 will be treated as uncased)', default=1 ) parser.add_argument( '--vocab_file', type=str, help='Specify absolute path to vocab file to use)' ) parser.add_argument( '--skip_wikiextractor', type=int, help='Specify whether to skip wikiextractor step 0=False, 1=True', default=0 ) parser.add_argument( '--interactive_json_config_generator', type=str, help='Specify the action you want the app to take. e.g., generate vocab, segment, create tfrecords' ) args = parser.parse_args() main(args)
TensorFlow2/Recommendation/DLRM_and_DCNv2/tests/feature_specs
feature_specs
different_feature_names
channel_spec: categorical: - 65ytfg.bin - dgtwrg.bin - hmfgd.bin - 6tyjgh.bin - 67yu.bin - l6rtd.bin - ouikjhfg.bin - 65ry.bin - 5yhtrfg.bin - 65rty.bin - 34ywesh5rtg.bin - w4su6js.bin - 45wyhtr.bin - u65rhty.bin - tujy.bin - tyjdh.bin - ujtyesh.bin - 5e7tdyj.bin - 46rjydh.bin - 8kiujynrht.bin - fsgh.bin - 34eyr.bin - we5etydj.bin - fsghfsdgh.bin - hrthshs.bin - tujyhfg.bin label: - qwer numerical: &id001 - gadsfgsdfg - 5yrthf - 45ryhtf - u5j6yrhtfd - u5rtg3qq - j65ee5he5 - yhe5h - 4y5e6ru - 5yfwerf - g53g6y635 - 42c524 - bge5v6gve5 - jhw5rf feature_spec: 65ytfg.bin: cardinality: 100000 dtype: int32 dgtwrg.bin: cardinality: 100000 dtype: int32 34ywesh5rtg.bin: cardinality: 100000 dtype: int32 w4su6js.bin: cardinality: 100000 dtype: int32 45wyhtr.bin: cardinality: 100000 dtype: int32 u65rhty.bin: cardinality: 100000 dtype: int32 tujy.bin: cardinality: 100000 dtype: int32 tyjdh.bin: cardinality: 100000 dtype: int32 ujtyesh.bin: cardinality: 100000 dtype: int32 5e7tdyj.bin: cardinality: 100000 dtype: int32 46rjydh.bin: cardinality: 100000 dtype: int32 8kiujynrht.bin: cardinality: 100000 dtype: int32 hmfgd.bin: cardinality: 100000 dtype: int32 fsgh.bin: cardinality: 100000 dtype: int32 34eyr.bin: cardinality: 100000 dtype: int32 we5etydj.bin: cardinality: 100000 dtype: int32 fsghfsdgh.bin: cardinality: 100000 dtype: int32 hrthshs.bin: cardinality: 100000 dtype: int32 tujyhfg.bin: cardinality: 100000 dtype: int32 6tyjgh.bin: cardinality: 100000 dtype: int32 67yu.bin: cardinality: 100000 dtype: int32 l6rtd.bin: cardinality: 100000 dtype: int32 ouikjhfg.bin: cardinality: 100000 dtype: int32 65ry.bin: cardinality: 100000 dtype: int32 5yhtrfg.bin: cardinality: 100000 dtype: int32 65rty.bin: cardinality: 100000 dtype: int32 qwer: dtype: bool gadsfgsdfg: dtype: float16 5yrthf: dtype: float16 42c524: dtype: float16 bge5v6gve5: dtype: float16 jhw5rf: dtype: float16 45ryhtf: dtype: float16 u5j6yrhtfd: dtype: float16 u5rtg3qq: dtype: float16 j65ee5he5: dtype: float16 yhe5h: dtype: float16 4y5e6ru: dtype: float16 5yfwerf: dtype: float16 g53g6y635: dtype: float16 metadata: {} source_spec: test: - features: *id001 files: - test/numerical.bin type: split_binary - features: - qwer files: - test/label.bin type: split_binary - features: - 65ytfg.bin files: - test/65ytfg.bin type: split_binary - features: - dgtwrg.bin files: - test/dgtwrg.bin type: split_binary - features: - hmfgd.bin files: - test/hmfgd.bin type: split_binary - features: - 6tyjgh.bin files: - test/6tyjgh.bin type: split_binary - features: - 67yu.bin files: - test/67yu.bin type: split_binary - features: - l6rtd.bin files: - test/l6rtd.bin type: split_binary - features: - ouikjhfg.bin files: - test/ouikjhfg.bin type: split_binary - features: - 65ry.bin files: - test/65ry.bin type: split_binary - features: - 5yhtrfg.bin files: - test/5yhtrfg.bin type: split_binary - features: - 65rty.bin files: - test/65rty.bin type: split_binary - features: - 34ywesh5rtg.bin files: - test/34ywesh5rtg.bin type: split_binary - features: - w4su6js.bin files: - test/w4su6js.bin type: split_binary - features: - 45wyhtr.bin files: - test/45wyhtr.bin type: split_binary - features: - u65rhty.bin files: - test/u65rhty.bin type: split_binary - features: - tujy.bin files: - test/tujy.bin type: split_binary - features: - tyjdh.bin files: - test/tyjdh.bin type: split_binary - features: - ujtyesh.bin files: - test/ujtyesh.bin type: split_binary - features: - 5e7tdyj.bin files: - test/5e7tdyj.bin type: split_binary - features: - 46rjydh.bin files: - test/46rjydh.bin type: split_binary - features: - 8kiujynrht.bin files: - test/8kiujynrht.bin type: split_binary - features: - fsgh.bin files: - test/fsgh.bin type: split_binary - features: - 34eyr.bin files: - test/34eyr.bin type: split_binary - features: - we5etydj.bin files: - test/we5etydj.bin type: split_binary - features: - fsghfsdgh.bin files: - test/fsghfsdgh.bin type: split_binary - features: - hrthshs.bin files: - test/hrthshs.bin type: split_binary - features: - tujyhfg.bin files: - test/tujyhfg.bin type: split_binary train: - features: *id001 files: - train/numerical.bin type: split_binary - features: - qwer files: - train/label.bin type: split_binary - features: - 65ytfg.bin files: - train/65ytfg.bin type: split_binary - features: - dgtwrg.bin files: - train/dgtwrg.bin type: split_binary - features: - hmfgd.bin files: - train/hmfgd.bin type: split_binary - features: - 6tyjgh.bin files: - train/6tyjgh.bin type: split_binary - features: - 67yu.bin files: - train/67yu.bin type: split_binary - features: - l6rtd.bin files: - train/l6rtd.bin type: split_binary - features: - ouikjhfg.bin files: - train/ouikjhfg.bin type: split_binary - features: - 65ry.bin files: - train/65ry.bin type: split_binary - features: - 5yhtrfg.bin files: - train/5yhtrfg.bin type: split_binary - features: - 65rty.bin files: - train/65rty.bin type: split_binary - features: - 34ywesh5rtg.bin files: - train/34ywesh5rtg.bin type: split_binary - features: - w4su6js.bin files: - train/w4su6js.bin type: split_binary - features: - 45wyhtr.bin files: - train/45wyhtr.bin type: split_binary - features: - u65rhty.bin files: - train/u65rhty.bin type: split_binary - features: - tujy.bin files: - train/tujy.bin type: split_binary - features: - tyjdh.bin files: - train/tyjdh.bin type: split_binary - features: - ujtyesh.bin files: - train/ujtyesh.bin type: split_binary - features: - 5e7tdyj.bin files: - train/5e7tdyj.bin type: split_binary - features: - 46rjydh.bin files: - train/46rjydh.bin type: split_binary - features: - 8kiujynrht.bin files: - train/8kiujynrht.bin type: split_binary - features: - fsgh.bin files: - train/fsgh.bin type: split_binary - features: - 34eyr.bin files: - train/34eyr.bin type: split_binary - features: - we5etydj.bin files: - train/we5etydj.bin type: split_binary - features: - fsghfsdgh.bin files: - train/fsghfsdgh.bin type: split_binary - features: - hrthshs.bin files: - train/hrthshs.bin type: split_binary - features: - tujyhfg.bin files: - train/tujyhfg.bin type: split_binary