relative_path
stringclasses
812 values
section
stringclasses
339 values
filename
stringlengths
2
61
text
stringlengths
6
1.76M
PyTorch/SpeechSynthesis/FastPitch/triton
triton
run_online_performance_test_on_triton
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ], where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ] `--shape IMAGE:3,224,224`. """ import argparse import csv import os import sys from pathlib import Path from typing import List, Optional # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.warmup import warmup def calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def update_performance_data(results: List, performance_file: str): with open(performance_file, "r") as csvfile: reader = csv.DictReader(csvfile) for row in reader: row["avg latency"] = calculate_average_latency(row) results.append(row) def _parse_batch_sizes(batch_sizes: str): batches = batch_sizes.split(sep=",") return list(map(lambda x: int(x.strip()), batches)) def online_performance( model_name: str, batch_sizes: List[int], result_path: str, input_shapes: Optional[List[str]] = None, profiling_data: str = "random", triton_instances: int = 1, triton_gpu_engine_count: int = 1, server_url: str = "localhost", measurement_window: int = 10000, shared_memory: bool = False ): print("\n") print(f"==== Dynamic batching analysis start ====") print("\n") input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else "" print(f"Running performance tests for dynamic batching") performance_file = f"triton_performance_dynamic_partial.csv" max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count max_concurrency = min(256, max_total_requests) batch_size = max(1, max_total_requests // 256) step = max(1, max_concurrency // 32) min_concurrency = step exec_args = f"""-m {model_name} \ -x 1 \ -p {measurement_window} \ -v \ -i http \ -u {server_url}:8000 \ -b {batch_size} \ -f {performance_file} \ --concurrency-range {min_concurrency}:{max_concurrency}:{step} \ --input-data {profiling_data} {input_shapes}""" if shared_memory: exec_args += " --shared-memory=cuda" result = os.system(f"perf_client {exec_args}") if result != 0: print(f"Failed running performance tests. Perf client failed with exit code {result}") sys.exit(1) results = list() update_performance_data(results=results, performance_file=performance_file) results = sort_results(results=results) save_results(filename=result_path, data=results) show_results(results=results) os.remove(performance_file) print("Performance results for dynamic batching stored in: {0}".format(result_path)) print("\n") print(f"==== Analysis done ====") print("\n") def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test") parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling." ) parser.add_argument( "--input-shape", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.") parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances") parser.add_argument( "--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server" ) parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.") parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server") parser.add_argument( "--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000 ) parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true", default=False) args = parser.parse_args() warmup( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, triton_gpu_engine_count=args.number_of_model_instances, profiling_data=args.input_data, input_shapes=args.input_shape, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) online_performance( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, triton_gpu_engine_count=args.number_of_model_instances, profiling_data=args.input_data, input_shapes=args.input_shape, result_path=args.result_path, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) if __name__ == "__main__": main()
TensorFlow2/Classification/ConvNets/model
model
efficientnet_model_v1
# Lint as: python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains definitions for EfficientNet v1 model. [1] Mingxing Tan, Quoc V. Le EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks. ICML'19, https://arxiv.org/abs/1905.11946 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import os from typing import Any, Dict, Optional, List, Text, Tuple import copy import tensorflow as tf import tensorflow.keras.backend as K import horovod.tensorflow as hvd from utils.optimizer_factory import GradientAccumulator from model.layers import simple_swish, hard_swish, identity, gelu, get_activation from model.blocks import conv2d_block, mb_conv_block from model.common_modules import round_filters, round_repeats, load_weights from dataloader import preprocessing from dataloader.dataset_factory import mixing_lite DENSE_KERNEL_INITIALIZER = { 'class_name': 'VarianceScaling', 'config': { 'scale': 1 / 3.0, 'mode': 'fan_in', 'distribution': 'uniform' } } @tf.keras.utils.register_keras_serializable(package='Vision') class Model(tf.keras.Model): """Wrapper class for an EfficientNet v1 Keras model. Contains helper methods to build, manage, and save metadata about the model. """ def __init__(self, config: Dict[Text, Any] = None): """Create an EfficientNet v1 model. Args: config: (optional) the main model parameters to create the model overrides: (optional) a dict containing keys that can override config """ super().__init__() self.config = config if self.config.grad_accum_steps > 1: self.grad_accumulator = GradientAccumulator() self.gradients_gnorm = tf.Variable(0, trainable=False, dtype=tf.float32) self.local_step = tf.Variable(initial_value=0, dtype=tf.int64, trainable=False, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA) input_channels = config.mparams.input_channels # Consistent with channels last format. will be permuted in _build, if channels first requested. input_shape = (None, None, input_channels) # Should handle any image size image_input = tf.keras.layers.Input(shape=input_shape) is_training ="predict" not in config.mode if is_training: mixup_input = tf.keras.layers.Input(shape=(1, 1, 1)) cutmix_input = tf.keras.layers.Input(shape=(None, None, 1)) is_tr_split = tf.keras.layers.Input(shape=(1)) # indicates whether we use tr or eval data loader inputs = [image_input,mixup_input,cutmix_input,is_tr_split] else: inputs = [image_input] output = self._build(inputs) # Cast to float32 in case we have a different model dtype output = tf.cast(output, tf.float32) # defining a Model object within another Model object is not the best design idea, # but I wanted to make use of existing functional API code from Subhankar self.model = tf.keras.Model(inputs=inputs,outputs=output) def call(self,data): is_predict ="predict" in self.config.mode if not is_predict: x=data['image'] mixup_weights = data['mixup_weight'] cutmix_masks = data['cutmix_mask'] is_tr_split = data['is_tr_split'] return self.model([x,mixup_weights,cutmix_masks,is_tr_split]) else: return self.model([data]) def _build(self, input: List[tf.keras.layers.Input]): """Creates an EfficientNet v1 graph given the model parameters. This function is wrapped by the `EfficientNet_v1` class to make a tf.keras.Model. Args: image_input: the input batch of images Returns: the output of efficientnet v1 """ config = self.config depth_coefficient = config.mparams.depth_coefficient blocks = config.mparams.blocks stem_base_filters = config.mparams.stem_base_filters top_base_filters = config.mparams.top_base_filters activation = get_activation(config.mparams.activation) dropout_rate = config.mparams.dropout_rate drop_connect_rate = config.mparams.drop_connect_rate num_classes = config.mparams.num_classes input_channels = config.mparams.input_channels rescale_input = config.mparams.rescale_input data_format = tf.keras.backend.image_data_format() dtype = config.mparams.dtype weight_decay = config.weight_decay weight_init = config.mparams.weight_init train_batch_size = config.train_batch_size do_mixup = config.mixup_alpha > 0 do_cutmix = config.cutmix_alpha > 0 def cond_mixing(args): images,mixup_weights,cutmix_masks,is_tr_split = args return tf.cond(tf.keras.backend.equal(is_tr_split[0],0), lambda: images, # eval phase lambda: mixing_lite(images,mixup_weights,cutmix_masks, train_batch_size, do_mixup, do_cutmix)) # tr phase images = input[0] x = images if len(input) > 1: # we get here only during train or train_and_eval modes if self.config.defer_img_mixing: # we get here only if we chose not to perform image mixing in the data loader # image mixing on device further accelrates training mixup_weights = input[1] cutmix_masks = input[2] is_tr_split = input[3] x = tf.keras.layers.Lambda(cond_mixing)([images,mixup_weights,cutmix_masks,is_tr_split]) # data loader outputs data in the channels last format if data_format == 'channels_first': # Happens on GPU/TPU if available. x = tf.keras.layers.Permute((3, 1, 2))(x) if rescale_input: # x-mean/std x = preprocessing.normalize_images(x, mean_rgb=config.mparams.mean_rgb, stddev_rgb=config.mparams.std_rgb, num_channels=input_channels, dtype=dtype, data_format=data_format) # Build stem x = conv2d_block(x, round_filters(stem_base_filters, config), config, kernel_size=[3, 3], strides=[2, 2], activation=activation, name='stem') # Build blocks num_blocks_total = sum( round_repeats(block['num_repeat'], depth_coefficient) for block in blocks) block_num = 0 for stack_idx, block in enumerate(blocks): assert block['num_repeat'] > 0 # Update block input and output filters based on depth multiplier block.update({ 'input_filters':round_filters(block['input_filters'], config), 'output_filters':round_filters(block['output_filters'], config), 'num_repeat':round_repeats(block['num_repeat'], depth_coefficient)}) # The first block needs to take care of stride and filter size increase drop_rate = drop_connect_rate * float(block_num) / num_blocks_total config.mparams.update({'drop_connect_rate': drop_rate}) # TODO(Sugh) replace block_prefix = 'stack_{}/block_0/'.format(stack_idx) x = mb_conv_block(x, block, config, block_prefix) block_num += 1 if block['num_repeat'] > 1: block.update({ 'input_filters':block['output_filters'], 'strides':(1, 1) }) for block_idx in range(block['num_repeat'] - 1): drop_rate = drop_connect_rate * float(block_num) / num_blocks_total config.mparams.update({'drop_connect_rate': drop_rate}) block_prefix = 'stack_{}/block_{}/'.format(stack_idx, block_idx + 1) x = mb_conv_block(x, block, config, prefix=block_prefix) block_num += 1 # Build top x = conv2d_block(x, round_filters(top_base_filters, config), config, activation=activation, name='top') # Build classifier DENSE_KERNEL_INITIALIZER['config']['mode'] = weight_init x = tf.keras.layers.GlobalAveragePooling2D(name='top_pool')(x) if dropout_rate and dropout_rate > 0: x = tf.keras.layers.Dropout(dropout_rate, name='top_dropout')(x) x = tf.keras.layers.Dense( num_classes, kernel_initializer=DENSE_KERNEL_INITIALIZER, kernel_regularizer=tf.keras.regularizers.l2(weight_decay), bias_regularizer=tf.keras.regularizers.l2(weight_decay), name='logits')(x) x = tf.keras.layers.Activation('softmax', name='probs', dtype=tf.float32)(x) return x
PyTorch/SpeechSynthesis/FastPitch/common
common
tb_dllogger
import atexit import glob import re from itertools import product from pathlib import Path import dllogger import torch import numpy as np from dllogger import StdOutBackend, JSONStreamBackend, Verbosity from torch.utils.tensorboard import SummaryWriter tb_loggers = {} class TBLogger: """ xyz_dummies: stretch the screen with empty plots so the legend would always fit for other plots """ def __init__(self, enabled, log_dir, name, interval=1, dummies=True): self.enabled = enabled self.interval = interval self.cache = {} if self.enabled: self.summary_writer = SummaryWriter( log_dir=Path(log_dir, name), flush_secs=120, max_queue=200) atexit.register(self.summary_writer.close) if dummies: for key in ('_', '✕'): self.summary_writer.add_scalar(key, 0.0, 1) def log(self, step, data): for k, v in data.items(): self.log_value(step, k, v.item() if type(v) is torch.Tensor else v) def log_value(self, step, key, val, stat='mean'): if self.enabled: if key not in self.cache: self.cache[key] = [] self.cache[key].append(val) if len(self.cache[key]) == self.interval: agg_val = getattr(np, stat)(self.cache[key]) self.summary_writer.add_scalar(key, agg_val, step) del self.cache[key] def log_grads(self, step, model): if self.enabled: norms = [p.grad.norm().item() for p in model.parameters() if p.grad is not None] for stat in ('max', 'min', 'mean'): self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms), stat=stat) def unique_log_fpath(fpath): """Have a unique log filename for every separate run""" log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1)) for f in glob.glob(f"{fpath}.*")]) return f"{fpath}.{log_num + 1}" def stdout_step_format(step): if isinstance(step, str): return step fields = [] if len(step) > 0: fields.append("epoch {:>4}".format(step[0])) if len(step) > 1: fields.append("iter {:>3}".format(step[1])) if len(step) > 2: fields[-1] += "/{}".format(step[2]) return " | ".join(fields) def stdout_metric_format(metric, metadata, value): name = metadata.get("name", metric + " : ") unit = metadata.get("unit", None) format = f'{{{metadata.get("format", "")}}}' fields = [name, format.format(value) if value is not None else value, unit] fields = [f for f in fields if f is not None] return "| " + " ".join(fields) def init(log_fpath, log_dir, enabled=True, tb_subsets=[], **tb_kw): if enabled: backends = [ JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True), JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)), StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format, metric_format=stdout_metric_format) ] else: backends = [] dllogger.init(backends=backends) dllogger.metadata("train_lrate", {"name": "lrate", "unit": None, "format": ":>3.2e"}) for id_, pref in [('train', ''), ('train_avg', 'avg train '), ('val', ' avg val '), ('val_ema', ' EMA val ')]: dllogger.metadata(f"{id_}_loss", {"name": f"{pref}loss", "unit": None, "format": ":>5.2f"}) dllogger.metadata(f"{id_}_mel_loss", {"name": f"{pref}mel loss", "unit": None, "format": ":>5.2f"}) dllogger.metadata(f"{id_}_kl_loss", {"name": f"{pref}kl loss", "unit": None, "format": ":>5.5f"}) dllogger.metadata(f"{id_}_kl_weight", {"name": f"{pref}kl weight", "unit": None, "format": ":>5.5f"}) dllogger.metadata(f"{id_}_frames/s", {"name": None, "unit": "frames/s", "format": ":>10.2f"}) dllogger.metadata(f"{id_}_took", {"name": "took", "unit": "s", "format": ":>3.2f"}) global tb_loggers tb_loggers = {s: TBLogger(enabled, log_dir, name=s, **tb_kw) for s in tb_subsets} def init_inference_metadata(batch_size=None): modalities = [('latency', 's', ':>10.5f'), ('RTF', 'x', ':>10.2f'), ('frames/s', 'frames/s', ':>10.2f'), ('samples/s', 'samples/s', ':>10.2f'), ('letters/s', 'letters/s', ':>10.2f'), ('tokens/s', 'tokens/s', ':>10.2f')] if batch_size is not None: modalities.append((f'RTF@{batch_size}', 'x', ':>10.2f')) percs = ['', 'avg', '90%', '95%', '99%'] models = ['', 'fastpitch', 'waveglow', 'hifigan'] for perc, model, (mod, unit, fmt) in product(percs, models, modalities): name = f'{perc} {model} {mod}'.strip().replace(' ', ' ') dllogger.metadata(name.replace(' ', '_'), {'name': f'{name: <26}', 'unit': unit, 'format': fmt}) def log(step, tb_total_steps=None, data={}, subset='train'): if tb_total_steps is not None: tb_loggers[subset].log(tb_total_steps, data) if subset != '': data = {f'{subset}_{key}': v for key, v in data.items()} dllogger.log(step, data=data) def log_grads_tb(tb_total_steps, grads, tb_subset='train'): tb_loggers[tb_subset].log_grads(tb_total_steps, grads) def parameters(data, verbosity=0, tb_subset=None): for k, v in data.items(): dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity) if tb_subset is not None and tb_loggers[tb_subset].enabled: tb_data = {k: v for k, v in data.items() if type(v) in (str, bool, int, float)} tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {}) def flush(): dllogger.flush() for tbl in tb_loggers.values(): if tbl.enabled: tbl.summary_writer.flush()
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2ModulationRemovalPlugin
taco2ModulationRemovalPlugin
taco2ModulationRemovalKernel
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2ModulationRemovalKernel.h" #include "taco2Utils.h" #include <algorithm> #include <cassert> #include <cfloat> namespace nvinfer1 { namespace plugin { /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const int WINDOW_SIZE = 1024; } // namespace /****************************************************************************** * KERNELS ******************************************************************** *****************************************************************************/ __global__ void modulationRemovalKernel(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int hopLength, const float scale) { // load weights into shared memory __shared__ float localWeights[WINDOW_SIZE]; for (int i = threadIdx.x; i < WINDOW_SIZE; i += blockDim.x) { localWeights[i] = weightsDevice[i]; } __syncthreads(); const int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < inputLength - WINDOW_SIZE) { const int inIdx = idx + (WINDOW_SIZE / 2); // start the window over the first overlap, and slide it until the last // overlap for this point float sum = 0.0f; const int windowOffset = inIdx % hopLength; for (int j = windowOffset; j < WINDOW_SIZE; j += hopLength) { if (inIdx - j >= 0) { sum += localWeights[j]; } } // normal all non-zero values for (int i = 0; i < batchSize; ++i) { float val = inputDevice[inIdx + inputLength * i]; if (sum > FLT_MIN) { val /= sum; } val *= scale; outputDevice[idx + (inputLength - WINDOW_SIZE) * i] = val; } } } /****************************************************************************** * PUBLIC STATIC METHODS ****************************************************** *****************************************************************************/ void Taco2ModulationRemovalKernel::compute(const int batchSize, const float* const weightsDevice, const float* const inputDevice, float* const outputDevice, const int inputLength, const int filterLength, const int hopLength, cudaStream_t stream) { assert(filterLength == WINDOW_SIZE); const dim3 grid(taco2::Taco2Utils::roundUpBlocks(inputLength - filterLength, WINDOW_SIZE)); const dim3 block(WINDOW_SIZE); modulationRemovalKernel<<<grid, block, 0, stream>>>(batchSize, weightsDevice, inputDevice, outputDevice, inputLength, hopLength, static_cast<float>(filterLength) / static_cast<float>(hopLength)); } } // namespace plugin } // namespace nvinfer1
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/tacotron2
tacotron2
maskGenerator
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_MASKGENERATOR_H #define TT2I_MASKGENERATOR_H #include "cuda_runtime.h" #include <cstdint> namespace tts { class MaskGenerator { public: /** * @brief Generate the mask vectors ([1.0, ..., 1.0, 0.0 ... 0.0]) for the * various input lengths, such that all ones will appear up to the given * length for each item, and all zeros will follow. * * @param lengthsDevice The input lengths on the device (must be of length * batchSize). * @param maskLength The length of each mask. * @param batchSize The size of the batch. * @param maskDevice The location to output the mask (must be of length * maskLength*batchSize). * @param stream The stream to execute on. */ static void generate( const int32_t* lengthsDevice, int maskLength, int batchSize, float* maskDevice, cudaStream_t stream); }; } // namespace tts #endif
PyTorch/Classification/ConvNets/efficientnet/training/AMP
AMP
DGXA100_efficientnet-b4_AMP
python ./multiproc.py --nproc_per_node 8 ./launch.py --model efficientnet-b4 --precision AMP --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
TensorFlow2/Segmentation/UNet_Medical/examples
examples
unet_TRAIN_FULL
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP32 and runs 5-fold cross-validation training for 6400 iterations. # Usage: # bash unet_TRAIN_FULL_TF-AMP.sh <number of GPUs> <path to dataset> <path to results directory> <batch size> horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 0 --augment --xla > $3/log_FP32_${1}GPU_fold0.txt horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 1 --augment --xla > $3/log_FP32_${1}GPU_fold1.txt horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 2 --augment --xla > $3/log_FP32_${1}GPU_fold2.txt horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 3 --augment --xla > $3/log_FP32_${1}GPU_fold3.txt horovodrun -np $1 python main.py --data_dir $2 --model_dir $3 --log_every 100 --max_steps 6400 --batch_size $4 --exec_mode train_and_evaluate --fold 4 --augment --xla > $3/log_FP32_${1}GPU_fold4.txt python runtime/parse_results.py --model_dir $3 --exec_mode convergence --env FP32_${1}GPU
Tools/PyTorch/TimeSeriesPredictionPlatform/conf
conf
train_derived_fields
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # @package _global_ dataset: config: # The line below is equivalent to python's `model.config.get(model_type, 'default') == 'graph' and dataset.config.get('graph', False)` # For more info on resolvers see: https://omegaconf.readthedocs.io/en/2.1_branch/custom_resolvers.html # We cannot reuse `graph: ...` because during resolution it queries dataset.config.graph which causes infinite recursion construct_graph: ${and:${cmp:${oc.select:model.config.model_type,default},graph},${oc.select:dataset.config.graph,false}} xgb: ${cont.lower:${oc.select:trainer._target_, ctltrainer}, xgbtrainer} stat: ${cont.lower:${oc.select:trainer._target_, ctltrainer}, stattrainer} trainer: criterion: reduction: ${if:${feature.selector:${dataset.config.features}, WEIGHT, CONTINUOUS},none,mean} config: encoder_length: ${dataset.config.encoder_length} example_length: ${dataset.config.example_length} model_type: ${oc.select:model.config.model_type,default} evaluator: config: preprocessor_state_path: ${dataset.config.dest_path}/tspp_preprocess.bin device: ${trainer.config.device} encoder_length: ${dataset.config.encoder_length} output_selector: ${oc.select:model.config.output_selector,0} model_type: ${oc.select:model.config.model_type,default} # We want to inform model about shape of the data model: config: device: ${trainer.config.device} encoder_length: ${dataset.config.encoder_length} example_length: ${dataset.config.example_length} num_ts: ${dataset.config.time_series_count} temporal_known_continuous_inp_size: ${len:${feature.selector:${dataset.config.features}, KNOWN, CONTINUOUS}} temporal_observed_continuous_inp_size: ${if:${dataset.config.MultiID},${add:${len:${feature.selector:${dataset.config.features}, OBSERVED, CONTINUOUS}},${dataset.config.time_series_count}},${len:${feature.selector:${dataset.config.features}, OBSERVED, CONTINUOUS}}} static_continuous_inp_size: ${len:${feature.selector:${dataset.config.features}, STATIC, CONTINUOUS}} temporal_target_size: ${len:${feature.selector:${dataset.config.features}, TARGET, CONTINUOUS}} # XXX: we currently support only continuous targets static_categorical_inp_lens: ${feature.cardinalities:${feature.selector:${dataset.config.features}, STATIC, CATEGORICAL}} temporal_known_categorical_inp_lens: ${feature.cardinalities:${feature.selector:${dataset.config.features}, KNOWN, CATEGORICAL}} temporal_observed_categorical_inp_lens: ${feature.cardinalities:${feature.selector:${dataset.config.features}, OBSERVED, CATEGORICAL}} num_static_vars: ${sum:${model.config.static_continuous_inp_size},${len:${model.config.static_categorical_inp_lens}}} num_future_vars: ${sum:${model.config.temporal_known_continuous_inp_size},${len:${model.config.temporal_known_categorical_inp_lens}}} num_historic_vars: ${sum:${model.config.num_future_vars},${model.config.temporal_observed_continuous_inp_size},${model.config.temporal_target_size},${len:${model.config.temporal_observed_categorical_inp_lens}}}
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks
networks
encoder_scaffold_test
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.modeling import activations from official.nlp.modeling import layers from official.nlp.modeling.networks import encoder_scaffold # Test class that wraps a standard transformer layer. If this layer is called # at any point, the list passed to the config object will be filled with a # boolean 'True'. We register this class as a Keras serializable so we can # test serialization below. # @tf.keras.utils.register_keras_serializable(package="TestOnly") class ValidatedTransformerLayer(layers.Transformer): def __init__(self, call_list, **kwargs): super(ValidatedTransformerLayer, self).__init__(**kwargs) self.list = call_list def call(self, inputs): self.list.append(True) return super(ValidatedTransformerLayer, self).call(inputs) def get_config(self): config = super(ValidatedTransformerLayer, self).get_config() config["call_list"] = [] return config # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class EncoderScaffoldLayerClassTest(keras_parameterized.TestCase): def test_network_creation(self): hidden_size = 32 sequence_length = 21 num_hidden_instances = 3 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=num_hidden_instances, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=ValidatedTransformerLayer, hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # The default output dtype is float32. self.assertAllEqual(tf.float32, data.dtype) self.assertAllEqual(tf.float32, pooled.dtype) # If call_list[0] exists and is True, the passed layer class was # instantiated from the given config properly. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_network_creation_with_float16_dtype(self): tf.keras.mixed_precision.experimental.set_policy("mixed_float16") hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float16", } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float16", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), classification_layer_dtype=tf.float16, hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) expected_data_shape = [None, sequence_length, hidden_size] expected_pooled_shape = [None, hidden_size] self.assertAllEqual(expected_data_shape, data.shape.as_list()) self.assertAllEqual(expected_pooled_shape, pooled.shape.as_list()) # If float_dtype is set to float16, the output should always be float16. self.assertAllEqual(tf.float16, data.dtype) self.assertAllEqual(tf.float16, pooled.dtype) def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } tf.keras.mixed_precision.experimental.set_policy("float32") print(hidden_cfg) print(embedding_cfg) # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # Creates a EncoderScaffold with max_sequence_length != sequence_length num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length * 2, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) _ = model.predict([word_id_data, mask_data, type_id_data]) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. hidden_size = 32 sequence_length = 21 embedding_cfg = { "vocab_size": 100, "type_vocab_size": 16, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, } hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cfg=embedding_cfg) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) @keras_parameterized.run_all_keras_modes class EncoderScaffoldEmbeddingNetworkTest(keras_parameterized.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_mask") embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) network = tf.keras.Model([word_ids, mask], [word_embeddings, mask]) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network, embedding_data=embedding_layer.embeddings) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data]) # Test that we can get the embedding data that we passed to the object. This # is necessary to support standard language model training. self.assertIs(embedding_layer.embeddings, test_network.get_embedding_table()) def test_serialize_deserialize(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 # Build an embedding network to swap in for the default network. This one # will have 2 inputs (mask and word_ids) instead of 3, and won't use # positional embeddings. word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_word_ids") mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name="input_mask") embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), name="word_embeddings") word_embeddings = embedding_layer(word_ids) network = tf.keras.Model([word_ids, mask], [word_embeddings, mask]) hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", } # Create a small EncoderScaffold for testing. test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cfg=hidden_cfg, embedding_cls=network, embedding_data=embedding_layer.embeddings) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask]) new_model = tf.keras.Model([word_ids, mask], [data, pooled]) data, pooled = test_network([word_ids, mask]) model = tf.keras.Model([word_ids, mask], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data]) new_data, new_cls = new_model.predict([word_id_data, mask_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) # We should not be able to get a reference to the embedding data. with self.assertRaisesRegex(RuntimeError, ".*does not have a reference.*"): new_network.get_embedding_table() @keras_parameterized.run_all_keras_modes class EncoderScaffoldHiddenInstanceTest(keras_parameterized.TestCase): def test_network_invocation(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float32", } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, embedding_cfg=embedding_cfg) # Create the inputs (note that the first dimension is implicit). word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = test_network([word_ids, mask, type_ids]) # Create a model based off of this network: model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Invoke the model. We can't validate the output data here (the model is too # complex) but this will catch structural runtime errors. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) _ = model.predict([word_id_data, mask_data, type_id_data]) # If call_list[0] exists and is True, the passed layer class was # called as part of the graph creation. self.assertNotEmpty(call_list) self.assertTrue(call_list[0], "The passed layer class wasn't instantiated.") def test_serialize_deserialize(self): hidden_size = 32 sequence_length = 21 vocab_size = 57 num_types = 7 embedding_cfg = { "vocab_size": vocab_size, "type_vocab_size": num_types, "hidden_size": hidden_size, "seq_length": sequence_length, "max_seq_length": sequence_length, "initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dropout_rate": 0.1, "dtype": "float32", } call_list = [] hidden_cfg = { "num_attention_heads": 2, "intermediate_size": 3072, "intermediate_activation": activations.gelu, "dropout_rate": 0.1, "attention_dropout_rate": 0.1, "kernel_initializer": tf.keras.initializers.TruncatedNormal(stddev=0.02), "dtype": "float32", "call_list": call_list } # Create a small EncoderScaffold for testing. This time, we pass an already- # instantiated layer object. xformer = ValidatedTransformerLayer(**hidden_cfg) test_network = encoder_scaffold.EncoderScaffold( num_hidden_instances=3, num_output_classes=hidden_size, classification_layer_initializer=tf.keras.initializers.TruncatedNormal( stddev=0.02), hidden_cls=xformer, embedding_cfg=embedding_cfg) # Create another network object from the first object's config. new_network = encoder_scaffold.EncoderScaffold.from_config( test_network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(test_network.get_config(), new_network.get_config()) # Create a model based off of the old and new networks: word_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) mask = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) type_ids = tf.keras.Input(shape=(sequence_length,), dtype=tf.int32) data, pooled = new_network([word_ids, mask, type_ids]) new_model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) data, pooled = test_network([word_ids, mask, type_ids]) model = tf.keras.Model([word_ids, mask, type_ids], [data, pooled]) # Copy the weights between models. new_model.set_weights(model.get_weights()) # Invoke the models. batch_size = 3 word_id_data = np.random.randint( vocab_size, size=(batch_size, sequence_length)) mask_data = np.random.randint(2, size=(batch_size, sequence_length)) type_id_data = np.random.randint( num_types, size=(batch_size, sequence_length)) data, cls = model.predict([word_id_data, mask_data, type_id_data]) new_data, new_cls = new_model.predict( [word_id_data, mask_data, type_id_data]) # The output should be equal. self.assertAllEqual(data, new_data) self.assertAllEqual(cls, new_cls) if __name__ == "__main__": assert tf.version.VERSION.startswith('2.') tf.test.main()
PyTorch/Recommendation/DLRM/triton
triton
README
# Deploying the DLRM model using Triton Inference Server This folder contains instructions for deploment and exemplary client application to run inference on Triton Inference Server as well as detailed performance analysis. ## Table Of Contents * [Solution Overview](#solution-overview) * [Quick Start Guide](#quick-start-guide) * [Running Triton Inference Server and client](#running-triton-inference-server-and-client) * [Performance](#performance) * [Latency vs Throughput](#throughputlatency-results) * [Advanced](#advanced) * [Dynamic batching support](#dynamic-batching-support) ## Solution Overview The [NVIDIA Triton Inference Server](https://github.com/NVIDIA/triton-inference-server) provides a datacenter and cloud inferencing solution optimized for NVIDIA GPUs. The server provides an inference service via an HTTP or gRPC endpoint, allowing remote clients to request inferencing for any number of GPU or CPU models being managed by the server. ## Quick Start Guide ### Running Triton Inference Server and client The very first step of deployment is to acquire trained checkpoint and model configuration for this checkpoint. Default model configuration are stored inside `dlrm/config` directory. **Currently, our implementation only supports TorchScript deployment for models that fit into the memory of a single GPU.** You can read more about training DLRM models on different dataset configurations based on frequency threshold in the preprocessing step in [README](https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Recommendation/DLRM/README.md#preprocessing-on-gpu). #### Inference container Every command below is called from special inference container. To build that container go to main repository folder and call `docker build -t dlrm-inference . -f triton/Dockerfile` This command will download dependencies and build inference container. Then run shell inside the container: `docker run -it --rm --gpus device=0 --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 --net=host -v <PATH_TO_MODEL_REPOSITORY>:/repository -v <PATH_TO_MODEL_CHECKPOINT>:/results/checkpoint -v <PATH_TO_DATASET>:/data dlrm-inference bash` Here `--gpus '"device=0,1,2,3"'` selects GPUs indexed by ordinals `0,1,2` and `3`, respectively. The server will see only these GPUs. If you write `device=all`, then the server will see all the available GPUs. `PATH_TO_MODEL_REPOSITORY` indicates location where deployed models were stored. #### Deploying the model To deploy the model into Triton compatible format use `deployer.py`. This script is meant to be run from the deployment docker container. ``` usage: deployer.py [-h] (--ts-script | --ts-trace | --onnx) [--triton-no-cuda] [--triton-model-name TRITON_MODEL_NAME] [--triton-model-version TRITON_MODEL_VERSION] [--triton-max-batch-size TRITON_MAX_BATCH_SIZE] [--triton-dyn-batching-delay TRITON_DYN_BATCHING_DELAY] [--triton-engine-count TRITON_ENGINE_COUNT] [--save-dir SAVE_DIR] [--deploy_cpu] ... optional arguments: -h, --help show this help message and exit --ts-script convert to torchscript using torch.jit.script --ts-trace convert to torchscript using torch.jit.trace --onnx convert to onnx using torch.onnx.export --deploy_cpu triton related flags: --triton-no-cuda Use the CPU for tracing. --triton-model-name TRITON_MODEL_NAME exports to appropriate directory structure for triton --triton-model-version TRITON_MODEL_VERSION exports to appropriate directory structure for triton --triton-max-batch-size TRITON_MAX_BATCH_SIZE Specifies the 'max_batch_size' in the triton model config. See the triton documentation for more info. --triton-dyn-batching-delay TRITON_DYN_BATCHING_DELAY Determines the dynamic_batching queue delay in milliseconds(ms) for the triton model config. Use '0' or '-1' to specify static batching. See the triton documentation for more info. --triton-engine-count TRITON_ENGINE_COUNT Specifies the 'instance_group' count value in the triton model config. See the triton documentation for more info. --save-dir SAVE_DIR Saved model directory other flags: model_arguments arguments that will be ignored by deployer lib and will be forwarded to your deployer script ``` Following model specific arguments have to be specified for model deployment: ``` --embedding_dim EMBEDDING_DIM Embedding dimensionality. --top_mlp_sizes TOP_MLP_SIZES [TOP_MLP_SIZES ...] Units in layers of top MLP (default: 1024 1024 512 256 1). --bottom_mlp_sizes BOTTOM_MLP_SIZES [BOTTOM_MLP_SIZES ...] Units in layers of bottom MLP (default: 512 256 128). --interaction_op {cat,dot} Interaction operator to use. --dataset DATASET Path to dataset directory contaning model_size.json file describing input sizes for each embedding layer. --batch_size BATCH_SIZE Internal dataloader batch size, usually it is the same as batch size specified in --triton-max-batch_size flag. --fp16 Set a model for fp16 deployment. --dump_perf_data DIRECTORY_NAME Dump binary performance data that can by loaded by perf client. --model_checkpoint MODEL_CHECKPOINT Checkpoint file with trained model that is going to be deployed. --cpu Export cpu model instead of gpu. ``` For example, to deploy model into TorchScript format, using half precision and max batch size 4096 called `dlrm-ts-trace-16` execute: `python -m triton.deployer --ts-trace --triton-model-name dlrm-ts-trace-16 --triton-max-batch-size 4096 --save-dir /repository -- --model_checkpoint /results/checkpoint --fp16 --batch_size 4096 --num_numerical_features 13 --embedding_dim 128 --top_mlp_sizes 1024 1024 512 256 1 --bottom_mlp_sizes 512 256 128 --interaction_op dot --dataset /data` Where `model_checkpoint` is a checkpoint for a trained model with the same configuration as used during export and dataset (or at least dataset configuration) is mounted under `/data` #### Running the Triton server **NOTE: This step is executed outside inference container** 1. `docker pull nvcr.io/nvidia/tritonserver:20.09-py3` 2. `docker run -d --rm --gpus device=0 --ipc=host --network=host [--cpuset-cpus=0-15] -p 8000:8000 -p 8001:8001 -p 8002:8002 -v <PATH_TO_MODEL_REPOSITORY>:/models nvcr.io/nvidia/tritonserver:20.09-py3 tritonserver --model-repository=/models --log-verbose=1 --model-control-mode=explicit` Here `--gpus '"device=0,1,2,3"'` selects GPUs indexed by ordinals `0,1,2` and `3`, respectively. The server will see only these GPUs. If you write `device=all`, then the server will see all the available GPUs. `PATH_TO_MODEL_REPOSITORY` indicates location where deployed models were stored. Additional `--model-control-mode` option allows to manually load and unload models. This is especially useful when dealing with numerous large models like DLRM. For models exported to onnx format and hosted inside onnx runtime it might be required to limit visible cpu to fully utlize gpu acceleration. Use `--cpuset-cpus` docker option for that. #### Running the client Exemplary client `client.py` allows to check model performance against synthetic or real validation data. Client connects to Triton server and perform inference. ``` usage: client.py [-h] --triton-server-url TRITON_SERVER_URL --triton-model-name TRITON_MODEL_NAME [--triton-model-version TRITON_MODEL_VERSION] [-v] [-H HTTP_HEADER] --dataset_config DATASET_CONFIG [--inference_data INFERENCE_DATA] [--batch_size BATCH_SIZE] [--drop_last_batch DROP_LAST_BATCH] [--fp16] [--test_batches TEST_BATCHES] optional arguments: -h, --help show this help message and exit --triton-server-url TRITON_SERVER_URL URL adress of triton server (with port) --triton-model-name TRITON_MODEL_NAME Triton deployed model name --triton-model-version TRITON_MODEL_VERSION Triton model version -v, --verbose Enable verbose output -H HTTP_HEADER HTTP headers to add to inference server requests. Format is -H"Header:Value". --dataset_config DATASET_CONFIG --inference_data INFERENCE_DATA Path to file with inference data. --batch_size BATCH_SIZE Inference request batch size --drop_last_batch DROP_LAST_BATCH Drops the last batch size if it's not full --fp16 Use 16bit for numerical input --test_batches TEST_BATCHES Specifies number of batches used in the inference ``` To run inference on the model exported in previous steps, using data located under `/data/test` execute: `python -m triton.client --triton-server-url localhost:8000 --triton-model-name dlrm-ts-trace-16 --dataset_config /data/model_size.json --inference_data /data/test --batch_size 4096 --fp16` #### Gathering performance data Performance data can be gathered using `perf_client` tool. To use this tool, performance data needs to be dumped during deployment. To do that, use `--dump_perf_data` option for the deployer: `python -m triton.deployer --ts-trace --triton-model-name dlrm-ts-trace-16 --triton-max-batch-size 4096 --save-dir /repository -- --model_checkpoint /results/checkpoint --fp16 --batch_size 4096 --num_numerical_features 13 --embedding_dim 128 --top_mlp_sizes 1024 1024 512 256 1 --bottom_mlp_sizes 512 256 128 --interaction_op dot --dataset /data --dump_perf_data /location/for/perfdata` When perf data are saved, `perf_client` can be used with following command: `/workspace/bin/perf_client --max-threads 10 -m dlrm-ts-trace-16 -x 1 -p 5000 -v -i gRPC -u localhost:8001 -b 4096 -l 5000 --concurrency-range 1 --input-data /location/for/perfdata -f result.csv` For more information about `perf_client` please refer to [official documentation](https://docs.nvidia.com/deeplearning/sdk/triton-inference-server-master-branch-guide/docs/optimization.html#perf-client). ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Throughput/Latency results Throughput is measured in recommendations/second, and latency in milliseconds. **TorchScript FP16 inference (V100-32G)** | Batch Size| Throughput [samples / s] | Median Latency [ms]| 95% latency [ms]| 99% latency [ms]| |--------:|--------------------:|--------------:|--------------:|---------------:| | 1 | 1019 | 0.966 | 1.027 | 1.082 | | 2 | 2119 | 0.989 | 1.047 | 1.086 | | 4 | 3340 | 1.199 | 1.277 | 1.290 | | 8 | 6641 | 1.197 | 1.284 | 1.314 | | 16 | 12.5k | 1.082 | 1.196 | 1.214 | | 32 | 28k | 1.133 | 1.271 | 1.291 | | 64 | 45k | 1.413 | 1.489 | 1.551 | | 128 | 105k | 1.223 | 1.270 | 1.290 | | 256 | 193.6k | 1.320 | 1.471 | 1.518 | | 512 | 376k | 1.367 | 1.449 | 1.486 | | 1024 | 740k | 1.379 | 1.463 | 1.536 | | 2048 | 1.105M | 1.817 | 2.106 | 2.195 | | 4096 | 1.488M | 2.730 | 2.851 | 3.266 | | 8192 | 1.676M | 4.851 | 5.174 | 5.486 | | 16384 | 1.831M | 8.926 | 9.127 | 9.415 | | 32768 | 1.756M | 18.543 | 19.625 | 20.223 | | 65536 | 1.678M | 38.950 | 41.112 | 41.985 | | 131072 | 1.547M | 86.258 | 90.772 | 92.511 | **TorchScript FP32 inference (V100-32G)** | Batch Size| Throughput [samples / s] | Median Latency [ms]| 95% latency [ms]| 99% latency [ms]| |--------:|--------------------:|--------------:|--------------:|---------------:| | 1 | 1153 | 0.855 | 0.909 | 0.929 | | 2 | 2084 | 0.950 | 1.042 | 1.199 | | 4 | 4105 | 0.955 | 1.033 | 1.177 | | 8 | 8356 | 0.943 | 1.029 | 1.179 | | 16 | 16.8k | 0.942 | 1.009 | 1.158 | | 32 | 28.3k | 1.134 | 1.274 | 1.336 | | 64 | 54.7k | 1.214 | 1.307 | 1.353 | | 128 | 118k | 1.036 | 1.255 | 1.303 | | 256 | 202k | 1.275 | 1.404 | 1.449 | | 512 | 401k | 1.286 | 1.365 | 1.397 | | 1024 | 707k | 1.448 | 1.518 | 1.550 | | 2048 | 833k | 2.450 | 2.547 | 2.610 | | 4096 | 1.013M | 3.996 | 4.361 | 4.683 | | 8192 | 1.091M | 7.333 | 7.951 | 8.115 | | 16384 | 1.180M | 13.8 | 14.462 | 15.053 | | 32768 | 1.173M | 27.927 | 28.655 | 28.841 | | 65536 | 1.140M | 57.046 | 58.627 | 58.861 | | 131072 | 1.074M | 120.982 | 122.193 | 122.337 | **TorchScript FP32 inference CPU (2x E5-2698 v4 @ 2.20GHz)** | Batch Size| Throughput [samples / s] | Avg Latency [ms]| 95% latency [ms]| 99% latency [ms]| |--------:|--------------------:|--------------:|--------------:|---------------:| | 1 | 923.2 | 1.051 | 1.195 | 1.225 | | 2 | 1660.8 | 1.204 | 1.486 | 1.597 | | 4 | 3553.6 | 1.095 | 1.456 | 1.65 | | 8 | 6692.8 | 1.112 | 1.56 | 1.787 | | 16 | 11.8k | 1.317 | 1.545 | 1.698 | | 32 | 19.2k | 1.636 | 1.851 | 2.261 | | 64 | 28.6k | 2.203 | 2.403 | 2.615 | | 128 | 37.3k | 3.333 | 3.968 | 4.143 | | 256 | 46.5k | 5.286 | 6.538 | 7.102 | | 512 | 63.5k | 7.962 | 8.256 | 10.052 | | 1024 | 85.8k | 10.777 | 16.563 | 17.917 | | 2048 | 117k | 17.169 | 19.441 | 26.955 | | 4096 | 95.8k | 41.988 | 45.996 | 50.483 | | 8192 | 85.1k | 92.251 | 131.333 | 133.578 | | 16384 | 88.5k | 187.677 | 204.676 | 231.393 | | 32768 | 78.6k | 408.815 | 428.574 | 429.58 | | 65536 | 91.8k | 804.059 | 810.328 | 810.328 | | 131072 | 78.6k| 1606.89 | 1635.36 | 1635.36 | ![Latency vs Throughput](./img/lat_vs_thr.png) The plot above shows, that the GPU is saturated with batch size 4096. However, running inference with larger batches might be faster, than running several inference requests. Therefore, we choose 65536 as the optimal batch size. ## Advanced ### Dynamic batching support The Triton server has a dynamic batching mechanism built in, that can be enabled. When it is enabled, then the server creates inference batches from the received requests. Since the output of the model is a single probability, the batch size of a single request may be large. Here it is assumed to be 4096. With dynamic batching enabled, the server will concatenate requests of this size into an inference batch. The upper bound of the size of the inference batch is set to 65536. All these parameters are configurable. Performance results on a single V100-32G (ONNX FP16 model) for various numbers of simultaneous requests are shown in the figure below. ![Dynamic batching](./img/dyn_batch_concurrency.png) The plot above shows, that if we have a 20ms upper bound on latency, then a single GPU can handle up to 8 concurrent requests. This leads to total throughput of 1.776.030 recommendations/sec. This means 35520 recommendations within 20ms, on a single GPU.
TensorFlow/Segmentation/UNet_Industrial/scripts/benchmarking
benchmarking
UNet_trainbench_AMP_1GPU
#!/usr/bin/env bash # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches UNet training benchmark in TF-AMP on 1 GPU using 16 batch size (16 per GPU) # Usage ./UNet_trainbench_AMP_1GPU.sh <path to dataset> <dagm classID (1-10)> BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" export TF_CPP_MIN_LOG_LEVEL=3 # Cleaning up for benchmark RESULT_DIR="/tmp" rm -rf "${RESULT_DIR}" python "${BASEDIR}/../../main.py" \ --unet_variant='tinyUNet' \ --activation_fn='relu' \ --exec_mode='training_benchmark' \ --iter_unit='batch' \ --num_iter=1500 \ --batch_size=16 \ --warmup_step=500 \ --results_dir="${RESULT_DIR}" \ --data_dir="${1}" \ --dataset_name='DAGM2007' \ --dataset_classID="${2}" \ --data_format='NCHW' \ --use_auto_loss_scaling \ --amp \ --xla \ --learning_rate=1e-4 \ --learning_rate_decay_factor=0.8 \ --learning_rate_decay_steps=500 \ --rmsprop_decay=0.9 \ --rmsprop_momentum=0.8 \ --loss_fn_name='adaptive_loss' \ --weight_decay=1e-5 \ --weight_init_method='he_uniform' \ --augment_data \ --display_every=250 \ --debug_verbosity=0
PyTorch/Segmentation/MaskRCNN/pytorch/configs/quick_schedules
quick_schedules
e2e_mask_rcnn_R_50_FPN_quick
MODEL: META_ARCHITECTURE: "GeneralizedRCNN" WEIGHT: "catalog://ImageNetPretrained/MSRA/R-50" BACKBONE: CONV_BODY: "R-50-FPN" OUT_CHANNELS: 256 RPN: USE_FPN: True ANCHOR_STRIDE: (4, 8, 16, 32, 64) PRE_NMS_TOP_N_TRAIN: 2000 PRE_NMS_TOP_N_TEST: 1000 POST_NMS_TOP_N_TEST: 1000 FPN_POST_NMS_TOP_N_TEST: 1000 ROI_HEADS: USE_FPN: True BATCH_SIZE_PER_IMAGE: 256 ROI_BOX_HEAD: POOLER_RESOLUTION: 7 POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) POOLER_SAMPLING_RATIO: 2 FEATURE_EXTRACTOR: "FPN2MLPFeatureExtractor" PREDICTOR: "FPNPredictor" ROI_MASK_HEAD: POOLER_SCALES: (0.25, 0.125, 0.0625, 0.03125) FEATURE_EXTRACTOR: "MaskRCNNFPNFeatureExtractor" PREDICTOR: "MaskRCNNC4Predictor" POOLER_RESOLUTION: 14 POOLER_SAMPLING_RATIO: 2 RESOLUTION: 28 SHARE_BOX_FEATURE_EXTRACTOR: False MASK_ON: True DATASETS: TRAIN: ("coco_2014_minival",) TEST: ("coco_2014_minival",) INPUT: MIN_SIZE_TRAIN: 600 MAX_SIZE_TRAIN: 1000 MIN_SIZE_TEST: 800 MAX_SIZE_TEST: 1000 DATALOADER: SIZE_DIVISIBILITY: 32 SOLVER: BASE_LR: 0.005 WEIGHT_DECAY: 0.0001 STEPS: (1500,) MAX_ITER: 2000 IMS_PER_BATCH: 4 TEST: IMS_PER_BATCH: 2
TensorFlow/Detection/SSD/models/research/slim/nets
nets
mobilenet_v1_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for MobileNet v1.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from nets import mobilenet_v1 slim = tf.contrib.slim class MobilenetV1Test(tf.test.TestCase): def testBuildClassificationNetwork(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith( 'MobilenetV1/Logits/SpatialSqueeze')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) self.assertTrue('Predictions' in end_points) self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes]) def testBuildPreLogitsNetwork(self): batch_size = 5 height, width = 224, 224 num_classes = None inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024]) self.assertFalse('Logits' in end_points) self.assertFalse('Predictions' in end_points) def testBuildBaseNetwork(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1_base(inputs) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 7, 7, 1024]) expected_endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', 'Conv2d_13_depthwise', 'Conv2d_13_pointwise'] self.assertItemsEqual(end_points.keys(), expected_endpoints) def testBuildOnlyUptoFinalEndpoint(self): batch_size = 5 height, width = 224, 224 endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise', 'Conv2d_4_depthwise', 'Conv2d_4_pointwise', 'Conv2d_5_depthwise', 'Conv2d_5_pointwise', 'Conv2d_6_depthwise', 'Conv2d_6_pointwise', 'Conv2d_7_depthwise', 'Conv2d_7_pointwise', 'Conv2d_8_depthwise', 'Conv2d_8_pointwise', 'Conv2d_9_depthwise', 'Conv2d_9_pointwise', 'Conv2d_10_depthwise', 'Conv2d_10_pointwise', 'Conv2d_11_depthwise', 'Conv2d_11_pointwise', 'Conv2d_12_depthwise', 'Conv2d_12_pointwise', 'Conv2d_13_depthwise', 'Conv2d_13_pointwise'] for index, endpoint in enumerate(endpoints): with tf.Graph().as_default(): inputs = tf.random_uniform((batch_size, height, width, 3)) out_tensor, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint=endpoint) self.assertTrue(out_tensor.op.name.startswith( 'MobilenetV1/' + endpoint)) self.assertItemsEqual(endpoints[:index+1], end_points.keys()) def testBuildCustomNetworkUsingConvDefs(self): batch_size = 5 height, width = 224, 224 conv_defs = [ mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128), mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512) ] inputs = tf.random_uniform((batch_size, height, width, 3)) net, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs) self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3')) self.assertListEqual(net.get_shape().as_list(), [batch_size, 56, 56, 512]) expected_endpoints = ['Conv2d_0', 'Conv2d_1_depthwise', 'Conv2d_1_pointwise', 'Conv2d_2_depthwise', 'Conv2d_2_pointwise', 'Conv2d_3_depthwise', 'Conv2d_3_pointwise'] self.assertItemsEqual(end_points.keys(), expected_endpoints) def testBuildAndCheckAllEndPointsUptoConv2d_13(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d], normalizer_fn=slim.batch_norm): _, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_13_pointwise') _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True) endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32], 'Conv2d_1_depthwise': [batch_size, 112, 112, 32], 'Conv2d_1_pointwise': [batch_size, 112, 112, 64], 'Conv2d_2_depthwise': [batch_size, 56, 56, 64], 'Conv2d_2_pointwise': [batch_size, 56, 56, 128], 'Conv2d_3_depthwise': [batch_size, 56, 56, 128], 'Conv2d_3_pointwise': [batch_size, 56, 56, 128], 'Conv2d_4_depthwise': [batch_size, 28, 28, 128], 'Conv2d_4_pointwise': [batch_size, 28, 28, 256], 'Conv2d_5_depthwise': [batch_size, 28, 28, 256], 'Conv2d_5_pointwise': [batch_size, 28, 28, 256], 'Conv2d_6_depthwise': [batch_size, 14, 14, 256], 'Conv2d_6_pointwise': [batch_size, 14, 14, 512], 'Conv2d_7_depthwise': [batch_size, 14, 14, 512], 'Conv2d_7_pointwise': [batch_size, 14, 14, 512], 'Conv2d_8_depthwise': [batch_size, 14, 14, 512], 'Conv2d_8_pointwise': [batch_size, 14, 14, 512], 'Conv2d_9_depthwise': [batch_size, 14, 14, 512], 'Conv2d_9_pointwise': [batch_size, 14, 14, 512], 'Conv2d_10_depthwise': [batch_size, 14, 14, 512], 'Conv2d_10_pointwise': [batch_size, 14, 14, 512], 'Conv2d_11_depthwise': [batch_size, 14, 14, 512], 'Conv2d_11_pointwise': [batch_size, 14, 14, 512], 'Conv2d_12_depthwise': [batch_size, 7, 7, 512], 'Conv2d_12_pointwise': [batch_size, 7, 7, 1024], 'Conv2d_13_depthwise': [batch_size, 7, 7, 1024], 'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) self.assertItemsEqual(endpoints_shapes.keys(), explicit_padding_end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in explicit_padding_end_points) self.assertListEqual( explicit_padding_end_points[endpoint_name].get_shape().as_list(), expected_shape) def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self): batch_size = 5 height, width = 224, 224 output_stride = 16 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d], normalizer_fn=slim.batch_norm): _, end_points = mobilenet_v1.mobilenet_v1_base( inputs, output_stride=output_stride, final_endpoint='Conv2d_13_pointwise') _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base( inputs, output_stride=output_stride, final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True) endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32], 'Conv2d_1_depthwise': [batch_size, 112, 112, 32], 'Conv2d_1_pointwise': [batch_size, 112, 112, 64], 'Conv2d_2_depthwise': [batch_size, 56, 56, 64], 'Conv2d_2_pointwise': [batch_size, 56, 56, 128], 'Conv2d_3_depthwise': [batch_size, 56, 56, 128], 'Conv2d_3_pointwise': [batch_size, 56, 56, 128], 'Conv2d_4_depthwise': [batch_size, 28, 28, 128], 'Conv2d_4_pointwise': [batch_size, 28, 28, 256], 'Conv2d_5_depthwise': [batch_size, 28, 28, 256], 'Conv2d_5_pointwise': [batch_size, 28, 28, 256], 'Conv2d_6_depthwise': [batch_size, 14, 14, 256], 'Conv2d_6_pointwise': [batch_size, 14, 14, 512], 'Conv2d_7_depthwise': [batch_size, 14, 14, 512], 'Conv2d_7_pointwise': [batch_size, 14, 14, 512], 'Conv2d_8_depthwise': [batch_size, 14, 14, 512], 'Conv2d_8_pointwise': [batch_size, 14, 14, 512], 'Conv2d_9_depthwise': [batch_size, 14, 14, 512], 'Conv2d_9_pointwise': [batch_size, 14, 14, 512], 'Conv2d_10_depthwise': [batch_size, 14, 14, 512], 'Conv2d_10_pointwise': [batch_size, 14, 14, 512], 'Conv2d_11_depthwise': [batch_size, 14, 14, 512], 'Conv2d_11_pointwise': [batch_size, 14, 14, 512], 'Conv2d_12_depthwise': [batch_size, 14, 14, 512], 'Conv2d_12_pointwise': [batch_size, 14, 14, 1024], 'Conv2d_13_depthwise': [batch_size, 14, 14, 1024], 'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) self.assertItemsEqual(endpoints_shapes.keys(), explicit_padding_end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in explicit_padding_end_points) self.assertListEqual( explicit_padding_end_points[endpoint_name].get_shape().as_list(), expected_shape) def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self): batch_size = 5 height, width = 224, 224 output_stride = 8 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d], normalizer_fn=slim.batch_norm): _, end_points = mobilenet_v1.mobilenet_v1_base( inputs, output_stride=output_stride, final_endpoint='Conv2d_13_pointwise') _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base( inputs, output_stride=output_stride, final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True) endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32], 'Conv2d_1_depthwise': [batch_size, 112, 112, 32], 'Conv2d_1_pointwise': [batch_size, 112, 112, 64], 'Conv2d_2_depthwise': [batch_size, 56, 56, 64], 'Conv2d_2_pointwise': [batch_size, 56, 56, 128], 'Conv2d_3_depthwise': [batch_size, 56, 56, 128], 'Conv2d_3_pointwise': [batch_size, 56, 56, 128], 'Conv2d_4_depthwise': [batch_size, 28, 28, 128], 'Conv2d_4_pointwise': [batch_size, 28, 28, 256], 'Conv2d_5_depthwise': [batch_size, 28, 28, 256], 'Conv2d_5_pointwise': [batch_size, 28, 28, 256], 'Conv2d_6_depthwise': [batch_size, 28, 28, 256], 'Conv2d_6_pointwise': [batch_size, 28, 28, 512], 'Conv2d_7_depthwise': [batch_size, 28, 28, 512], 'Conv2d_7_pointwise': [batch_size, 28, 28, 512], 'Conv2d_8_depthwise': [batch_size, 28, 28, 512], 'Conv2d_8_pointwise': [batch_size, 28, 28, 512], 'Conv2d_9_depthwise': [batch_size, 28, 28, 512], 'Conv2d_9_pointwise': [batch_size, 28, 28, 512], 'Conv2d_10_depthwise': [batch_size, 28, 28, 512], 'Conv2d_10_pointwise': [batch_size, 28, 28, 512], 'Conv2d_11_depthwise': [batch_size, 28, 28, 512], 'Conv2d_11_pointwise': [batch_size, 28, 28, 512], 'Conv2d_12_depthwise': [batch_size, 28, 28, 512], 'Conv2d_12_pointwise': [batch_size, 28, 28, 1024], 'Conv2d_13_depthwise': [batch_size, 28, 28, 1024], 'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) self.assertItemsEqual(endpoints_shapes.keys(), explicit_padding_end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in explicit_padding_end_points) self.assertListEqual( explicit_padding_end_points[endpoint_name].get_shape().as_list(), expected_shape) def testBuildAndCheckAllEndPointsApproximateFaceNet(self): batch_size = 5 height, width = 128, 128 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d], normalizer_fn=slim.batch_norm): _, end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75) _, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base( inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75, use_explicit_padding=True) # For the Conv2d_0 layer FaceNet has depth=16 endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24], 'Conv2d_1_depthwise': [batch_size, 64, 64, 24], 'Conv2d_1_pointwise': [batch_size, 64, 64, 48], 'Conv2d_2_depthwise': [batch_size, 32, 32, 48], 'Conv2d_2_pointwise': [batch_size, 32, 32, 96], 'Conv2d_3_depthwise': [batch_size, 32, 32, 96], 'Conv2d_3_pointwise': [batch_size, 32, 32, 96], 'Conv2d_4_depthwise': [batch_size, 16, 16, 96], 'Conv2d_4_pointwise': [batch_size, 16, 16, 192], 'Conv2d_5_depthwise': [batch_size, 16, 16, 192], 'Conv2d_5_pointwise': [batch_size, 16, 16, 192], 'Conv2d_6_depthwise': [batch_size, 8, 8, 192], 'Conv2d_6_pointwise': [batch_size, 8, 8, 384], 'Conv2d_7_depthwise': [batch_size, 8, 8, 384], 'Conv2d_7_pointwise': [batch_size, 8, 8, 384], 'Conv2d_8_depthwise': [batch_size, 8, 8, 384], 'Conv2d_8_pointwise': [batch_size, 8, 8, 384], 'Conv2d_9_depthwise': [batch_size, 8, 8, 384], 'Conv2d_9_pointwise': [batch_size, 8, 8, 384], 'Conv2d_10_depthwise': [batch_size, 8, 8, 384], 'Conv2d_10_pointwise': [batch_size, 8, 8, 384], 'Conv2d_11_depthwise': [batch_size, 8, 8, 384], 'Conv2d_11_pointwise': [batch_size, 8, 8, 384], 'Conv2d_12_depthwise': [batch_size, 4, 4, 384], 'Conv2d_12_pointwise': [batch_size, 4, 4, 768], 'Conv2d_13_depthwise': [batch_size, 4, 4, 768], 'Conv2d_13_pointwise': [batch_size, 4, 4, 768]} self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in end_points) self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape) self.assertItemsEqual(endpoints_shapes.keys(), explicit_padding_end_points.keys()) for endpoint_name, expected_shape in endpoints_shapes.items(): self.assertTrue(endpoint_name in explicit_padding_end_points) self.assertListEqual( explicit_padding_end_points[endpoint_name].get_shape().as_list(), expected_shape) def testModelHasExpectedNumberOfParameters(self): batch_size = 5 height, width = 224, 224 inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.conv2d, slim.separable_conv2d], normalizer_fn=slim.batch_norm): mobilenet_v1.mobilenet_v1_base(inputs) total_params, _ = slim.model_analyzer.analyze_vars( slim.get_model_variables()) self.assertAlmostEqual(3217920, total_params) def testBuildEndPointsWithDepthMultiplierLessThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')] _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=0.5) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(0.5 * original_depth, new_depth) def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) _, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) endpoint_keys = [key for key in end_points.keys() if key.startswith('Mixed') or key.startswith('Conv')] _, end_points_with_multiplier = mobilenet_v1.mobilenet_v1( inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0) for key in endpoint_keys: original_depth = end_points[key].get_shape().as_list()[3] new_depth = end_points_with_multiplier[key].get_shape().as_list()[3] self.assertEqual(2.0 * original_depth, new_depth) def testRaiseValueErrorWithInvalidDepthMultiplier(self): batch_size = 5 height, width = 224, 224 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) with self.assertRaises(ValueError): _ = mobilenet_v1.mobilenet_v1( inputs, num_classes, depth_multiplier=-0.1) with self.assertRaises(ValueError): _ = mobilenet_v1.mobilenet_v1( inputs, num_classes, depth_multiplier=0.0) def testHalfSizeImages(self): batch_size = 5 height, width = 112, 112 num_classes = 1000 inputs = tf.random_uniform((batch_size, height, width, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 4, 4, 1024]) def testUnknownImageShape(self): tf.reset_default_graph() batch_size = 2 height, width = 224, 224 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024]) def testGlobalPoolUnknownImageShape(self): tf.reset_default_graph() batch_size = 1 height, width = 250, 300 num_classes = 1000 input_np = np.random.uniform(0, 1, (batch_size, height, width, 3)) with self.test_session() as sess: inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3)) logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes, global_pool=True) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes]) pre_pool = end_points['Conv2d_13_pointwise'] feed_dict = {inputs: input_np} tf.global_variables_initializer().run() pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict) self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024]) def testUnknowBatchSize(self): batch_size = 1 height, width = 224, 224 num_classes = 1000 inputs = tf.placeholder(tf.float32, (None, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes) self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits')) self.assertListEqual(logits.get_shape().as_list(), [None, num_classes]) images = tf.random_uniform((batch_size, height, width, 3)) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(logits, {inputs: images.eval()}) self.assertEquals(output.shape, (batch_size, num_classes)) def testEvaluation(self): batch_size = 2 height, width = 224, 224 num_classes = 1000 eval_inputs = tf.random_uniform((batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, is_training=False) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (batch_size,)) def testTrainEvalWithReuse(self): train_batch_size = 5 eval_batch_size = 2 height, width = 150, 150 num_classes = 1000 train_inputs = tf.random_uniform((train_batch_size, height, width, 3)) mobilenet_v1.mobilenet_v1(train_inputs, num_classes) eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3)) logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes, reuse=True) predictions = tf.argmax(logits, 1) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) output = sess.run(predictions) self.assertEquals(output.shape, (eval_batch_size,)) def testLogitsNotSqueezed(self): num_classes = 25 images = tf.random_uniform([1, 224, 224, 3]) logits, _ = mobilenet_v1.mobilenet_v1(images, num_classes=num_classes, spatial_squeeze=False) with self.test_session() as sess: tf.global_variables_initializer().run() logits_out = sess.run(logits) self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) def testBatchNormScopeDoesNotHaveIsTrainingWhenItsSetToNone(self): sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=None) self.assertNotIn('is_training', sc[slim.arg_scope_func_key( slim.batch_norm)]) def testBatchNormScopeDoesHasIsTrainingWhenItsNotNone(self): sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=True) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False) self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) sc = mobilenet_v1.mobilenet_v1_arg_scope() self.assertIn('is_training', sc[slim.arg_scope_func_key(slim.batch_norm)]) if __name__ == '__main__': tf.test.main()
PyTorch/LanguageModeling/BERT/triton/runner/maintainer
maintainer
maintainer_factory
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .docker.maintainer import DockerMaintainer class MaintainerFactory: @staticmethod def create_docker_maintainer(): return DockerMaintainer()
PyTorch/Classification/GPUNet/triton/runner
runner
pipeline
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, Tuple # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .stages import ( ConversionStage, DeployStage, ExportStage, ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage, TritonPreparePerformanceProfilingDataStage, ) class Pipeline: """ Definition of stages that has to be executed before and during experiments """ # Stages to execute as part of single experiment _experiment_stages = [ ExportStage.label, ConversionStage.label, DeployStage.label, TritonPreparePerformanceProfilingDataStage.label, TritonPerformanceOfflineStage.label, TritonPerformanceOnlineStage.label, ] def __init__(self): """ Initialize pipeline """ self._stages: Dict = dict() def model_export(self, commands: Tuple[str, ...]) -> None: """ Model export stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ExportStage(commands=commands) self._stages[stage.label] = stage def model_conversion(self, commands: Tuple[str, ...]) -> None: """ Model conversion stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ConversionStage(commands=commands) self._stages[stage.label] = stage def model_deploy(self, commands: Tuple[str, ...]) -> None: """ Model deployment stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = DeployStage(commands=commands) self._stages[stage.label] = stage def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None: """ Model profiling data creation stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = TritonPreparePerformanceProfilingDataStage(commands=commands) self._stages[stage.label] = stage def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance offline test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOfflineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE, ) self._stages[stage.label] = stage def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance online test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOnlineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_ONLINE, ) self._stages[stage.label] = stage def stages(self): """ Generate stages which should be run per experiment Returns: Generator with stages object """ for stage_name in self._experiment_stages: stage = self._stages.get(stage_name) if not stage: continue yield stage
TensorFlow/Translation/GNMT
GNMT
attention_wrapper
# Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A powerful dynamic attention wrapper object.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import numpy as np import tensorflow as tf from utils import math_utils from utils import misc_utils from tensorflow.contrib.framework.python.framework import tensor_util from tensorflow.python.framework import function from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import rnn_cell_impl _zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access nest = tf.contrib.framework.nest class AttentionMechanism(object): @property def alignments_size(self): raise NotImplementedError @property def state_size(self): raise NotImplementedError def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined): """Convert to tensor and possibly mask `memory`. Args: memory: `Tensor`, shaped `[batch_size, max_time, ...]`. memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`. check_inner_dims_defined: Python boolean. If `True`, the `memory` argument's shape is checked to ensure all but the two outermost dimensions are fully defined. Returns: A (possibly masked), checked, new `memory`. Raises: ValueError: If `check_inner_dims_defined` is `True` and not `memory.shape[2:].is_fully_defined()`. """ memory = nest.map_structure( lambda m: tf.convert_to_tensor(m, name="memory"), memory) if memory_sequence_length is not None: memory_sequence_length = tf.convert_to_tensor( memory_sequence_length, name="memory_sequence_length") if check_inner_dims_defined: def _check_dims(m): if not m.get_shape()[2:].is_fully_defined(): raise ValueError("Expected memory %s to have fully defined inner dims, " "but saw shape: %s" % (m.name, m.get_shape())) nest.map_structure(_check_dims, memory) if memory_sequence_length is None: seq_len_mask = None else: seq_len_mask = tf.sequence_mask( memory_sequence_length, maxlen=tf.shape(nest.flatten(memory)[0])[1], dtype=nest.flatten(memory)[0].dtype) seq_len_batch_size = ( memory_sequence_length.shape[0].value or tf.shape(memory_sequence_length)[0]) def _maybe_mask(m, seq_len_mask): rank = m.get_shape().ndims rank = rank if rank is not None else tf.rank(m) extra_ones = tf.ones(rank - 2, dtype=tf.int32) m_batch_size = m.shape[0].value or tf.shape(m)[0] if memory_sequence_length is not None: seq_len_mask = tf.reshape( seq_len_mask, tf.concat((tf.shape(seq_len_mask), extra_ones), 0)) return m * seq_len_mask else: return m return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory) def _maybe_mask_score(score, memory_sequence_length, score_mask_value): if memory_sequence_length is None: return score if score_mask_value is None: score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf) score_mask = tf.sequence_mask( memory_sequence_length, maxlen=tf.shape(score)[1]) score_mask_values = score_mask_value * tf.ones_like(score) return tf.where(score_mask, score, score_mask_values) class _BaseAttentionMechanism(AttentionMechanism): """A base AttentionMechanism class providing common functionality. Common functionality includes: 1. Storing the query and memory layers. 2. Preprocessing and storing the memory. """ def __init__(self, query_layer, memory, probability_fn, memory_sequence_length=None, memory_layer=None, check_inner_dims_defined=True, score_mask_value=None, name=None): """Construct base AttentionMechanism class. Args: query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth must match the depth of `memory_layer`. If `query_layer` is not provided, the shape of `query` must match that of `memory_layer`. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. probability_fn: A `callable`. Converts the score and previous alignments to probabilities. Its signature should be: `probabilities = probability_fn(score, state)`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's depth must match the depth of `query_layer`. If `memory_layer` is not provided, the shape of `memory` must match that of `query_layer`. check_inner_dims_defined: Python boolean. If `True`, the `memory` argument's shape is checked to ensure all but the two outermost dimensions are fully defined. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. name: Name to use when creating ops. """ if (query_layer is not None and not isinstance(query_layer, tf.layers.Layer)): raise TypeError( "query_layer is not a Layer: %s" % type(query_layer).__name__) if (memory_layer is not None and not isinstance(memory_layer, tf.layers.Layer)): raise TypeError( "memory_layer is not a Layer: %s" % type(memory_layer).__name__) self._query_layer = query_layer self._memory_layer = memory_layer self.dtype = memory_layer.dtype if not callable(probability_fn): raise TypeError("probability_fn must be callable, saw type: %s" % type(probability_fn).__name__) self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda probability_fn( _maybe_mask_score(score, memory_sequence_length, score_mask_value), prev)) with tf.name_scope( name, "BaseAttentionMechanismInit", nest.flatten(memory)): self._values = _prepare_memory( memory, memory_sequence_length, check_inner_dims_defined=check_inner_dims_defined) self._keys = ( self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable else self._values) self._batch_size = ( self._keys.shape[0].value or tf.shape(self._keys)[0]) self._alignments_size = (self._keys.shape[1].value or tf.shape(self._keys)[1]) def build(self, query_shape): self._query_layer.build((None, query_shape[-1])) # memory_layer is built in the constructor. self.built = True @property def memory_layer(self): return self._memory_layer @property def query_layer(self): return self._query_layer @property def values(self): return self._values @property def keys(self): return self._keys @property def batch_size(self): return self._batch_size @property def alignments_size(self): return self._alignments_size @property def state_size(self): return self._alignments_size def initial_alignments(self, batch_size, dtype): """Creates the initial alignment values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return a tensor of all zeros. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A `dtype` tensor shaped `[batch_size, alignments_size]` (`alignments_size` is the values' `max_time`). """ max_time = self._alignments_size return _zero_state_tensors(max_time, batch_size, dtype) def initial_state(self, batch_size, dtype): """Creates the initial state values for the `AttentionWrapper` class. This is important for AttentionMechanisms that use the previous alignment to calculate the alignment at the next time step (e.g. monotonic attention). The default behavior is to return the same output as initial_alignments. Args: batch_size: `int32` scalar, the batch_size. dtype: The `dtype`. Returns: A structure of all-zero tensors with shapes as described by `state_size`. """ return self.initial_alignments(batch_size, dtype) def _bahdanau_score(processed_query, keys, normalize, v, g, b): """Implements Bahdanau-style (additive) scoring function. This attention has two forms. The first is Bhandanau attention, as described in: Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio. "Neural Machine Translation by Jointly Learning to Align and Translate." ICLR 2015. https://arxiv.org/abs/1409.0473 The second is the normalized form. This form is inspired by the weight normalization article: Tim Salimans, Diederik P. Kingma. "Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks." https://arxiv.org/abs/1602.07868 To enable the second form, set `normalize=True`. Args: processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys. keys: Processed memory, shape `[batch_size, max_time, num_units]`. normalize: Whether to normalize the score function. Returns: A `[batch_size, max_time]` tensor of unnormalized score values. """ # Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting. processed_query = tf.expand_dims(processed_query, 1) if normalize: # normed_v = g * v / ||v|| def NormalizedAttenFwd(keys, processed_query, g, v, b): """normalized atten.""" normed_v = g * v * tf.rsqrt( tf.reduce_sum(tf.square(v))) batch = tf.shape(keys)[0] max_time = tf.shape(keys)[1] units = tf.shape(keys)[-1] # [batch, time, dim] activation = tf.tanh(keys + processed_query + b) # [batch * time, dim] activation = tf.reshape(activation, [batch * max_time, units]) # [dim, 1] v = tf.expand_dims(normed_v, -1) # [batch * time, 1] -> [batch * time] y = tf.squeeze(tf.matmul(activation, v), axis=1) y = tf.reshape(y, [batch, max_time]) return y use_xla = os.environ["use_xla"] == "true" def NormalizedAtten(keys, processed_query, g, v, b): return NormalizedAttenFwd(keys, processed_query, g, v, b) fn = NormalizedAtten if os.environ["use_defun"] == "true": fn = function.Defun(compiled=use_xla)(fn) res = fn(keys, processed_query, g, v, b) res.set_shape((None, keys.shape[1])) return res else: def _Atten(keys, processed_query, v): """atten.""" batch = tf.shape(keys)[0] max_time = tf.shape(keys)[1] units = tf.shape(keys)[-1] activation = tf.tanh(keys + processed_query) activation = tf.reshape(activation, [batch * max_time, units]) v = tf.expand_dims(v, -1) y = tf.squeeze(tf.matmul(activation, v), axis=1) y = tf.reshape(y, [batch, max_time]) return y fn = _Atten if os.environ["use_defun"] == "true": fn = function.Defun()(fn) return fn(keys, processed_query, v) class BahdanauAttention(_BaseAttentionMechanism): """Implements Bahdanau-style (additive) attention. This attention has two forms. The first is Bahdanau attention, as described in: Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio. "Neural Machine Translation by Jointly Learning to Align and Translate." ICLR 2015. https://arxiv.org/abs/1409.0473 The second is the normalized form. This form is inspired by the weight normalization article: Tim Salimans, Diederik P. Kingma. "Weight Normalization: A Simple Reparameterization to Accelerate Training of Deep Neural Networks." https://arxiv.org/abs/1602.07868 To enable the second form, construct the object with parameter `normalize=True`. """ def __init__(self, num_units, memory, memory_sequence_length=None, normalize=False, probability_fn=None, score_mask_value=None, dtype=None, name="BahdanauAttention"): """Construct the Attention mechanism. Args: num_units: The depth of the query mechanism. memory: The memory to query; usually the output of an RNN encoder. This tensor should be shaped `[batch_size, max_time, ...]`. memory_sequence_length (optional): Sequence lengths for the batch entries in memory. If provided, the memory tensor rows are masked with zeros for values past the respective sequence lengths. normalize: Python boolean. Whether to normalize the energy term. probability_fn: (optional) A `callable`. Converts the score to probabilities. The default is `tf.nn.softmax`. Other options include `tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`. Its signature should be: `probabilities = probability_fn(score)`. score_mask_value: (optional): The mask value for score before passing into `probability_fn`. The default is -inf. Only used if `memory_sequence_length` is not None. dtype: The data type for the query and memory layers of the attention mechanism. name: Name to use when creating ops. """ if probability_fn is None: probability_fn = tf.nn.softmax if dtype is None: dtype = tf.float32 wrapped_probability_fn = lambda score, _: probability_fn(score) super(BahdanauAttention, self).__init__( query_layer=tf.layers.Dense( num_units, name="query_layer", use_bias=False, dtype=dtype), memory_layer=tf.layers.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype), memory=memory, probability_fn=wrapped_probability_fn, memory_sequence_length=memory_sequence_length, score_mask_value=score_mask_value, name=name) self._num_units = num_units self._normalize = normalize self._name = name self.v = tf.get_variable("attention_v", [num_units], dtype=dtype) # TODO(jamesqin): I have no idea why the following is happening. # If not doing read_value(), later in backprop it would complain about # AddN op is fed with "resource" dtype input, which shouldn't be happening. if isinstance(self.v, resource_variable_ops.ResourceVariable): self.v = self.v.read_value() self.g, self.b = None, None if self._normalize: # Scalar used in weight normalization self.g = tf.get_variable( "attention_g", dtype=dtype, initializer=tf.constant_initializer(math.sqrt((1. / num_units))), shape=()) # Same as above self.g if isinstance(self.g, resource_variable_ops.ResourceVariable): self.g = self.g.read_value() # Bias added prior to the nonlinearity self.b = tf.get_variable( "attention_b", [num_units], dtype=dtype, initializer=tf.zeros_initializer()) # Same as above self.g if isinstance(self.b, resource_variable_ops.ResourceVariable): self.b = self.b.read_value() def __call__(self, query, state): """Score the query based on the keys and values. Args: query: Tensor of dtype matching `self.values` and shape `[batch_size, query_depth]`. state: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). Returns: alignments: Tensor of dtype matching `self.values` and shape `[batch_size, alignments_size]` (`alignments_size` is memory's `max_time`). """ query_dim = query.shape[-1].value assert query_dim def _compute_alignments(query, state): with tf.variable_scope(None, "bahdanau_attention", [query]): # TODO(jamesqin): figure out the shape implications of Defun. query.set_shape((None, query_dim)) processed_query = self.query_layer(query) if self.query_layer else query score = _bahdanau_score(processed_query, self._keys, self._normalize, self.v, self.g, self.b) alignments = self._probability_fn(score, state) next_state = alignments return alignments, next_state return _compute_alignments(query, state) class AttentionWrapperState( collections.namedtuple("AttentionWrapperState", ("cell_state", "attention", "time", "alignments", "alignment_history", "attention_state"))): """`namedtuple` storing the state of a `AttentionWrapper`. Contains: - `cell_state`: The state of the wrapped `RNNCell` at the previous time step. - `attention`: The attention emitted at the previous time step. - `time`: int32 scalar containing the current time step. - `alignments`: A single or tuple of `Tensor`(s) containing the alignments emitted at the previous time step for each attention mechanism. - `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s) containing alignment matrices from all time steps for each attention mechanism. Call `stack()` on each to convert to a `Tensor`. - `attention_state`: A single or tuple of nested objects containing attention mechanism state for each attention mechanism. The objects may contain Tensors or TensorArrays. """ def clone(self, **kwargs): """Clone this object, overriding components provided by kwargs. The new state fields' shape must match original state fields' shape. This will be validated, and original fields' shape will be propagated to new fields. Example: ```python initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...) initial_state = initial_state.clone(cell_state=encoder_state) ``` Args: **kwargs: Any properties of the state object to replace in the returned `AttentionWrapperState`. Returns: A new `AttentionWrapperState` whose properties are the same as this one, except any overridden properties as provided in `kwargs`. """ def with_same_shape(old, new): """Check and set new tensor's shape.""" xla_compile = (os.environ["xla_compile"] == "true") if not xla_compile: if isinstance(old, tf.Tensor) and isinstance(new, tf.Tensor): return tensor_util.with_same_shape(old, new) return new return nest.map_structure( with_same_shape, self, super(AttentionWrapperState, self)._replace(**kwargs)) def _compute_attention(attention_mechanism, cell_output, attention_state, attention_layer): """Computes the attention and alignments for a given attention_mechanism.""" alignments, next_attention_state = attention_mechanism( cell_output, state=attention_state) # Context is the inner product of alignments and values along the # memory time dimension. # alignments shape is # [batch_size, 1, memory_time] # attention_mechanism.values shape is # [batch_size, memory_time, memory_size] # the batched matmul is over memory_time, so the output shape is # [batch_size, 1, memory_size]. # we then squeeze out the singleton dim. expanded_alignments = tf.expand_dims(alignments, 1) context = math_utils.BatchMatMul(expanded_alignments, attention_mechanism.values) context = tf.squeeze(context, [1]) if attention_layer is not None: attention = attention_layer(tf.concat([cell_output, context], 1)) else: attention = context return attention, alignments, next_attention_state class AttentionWrapper(tf.nn.rnn_cell.RNNCell): """Wraps another `RNNCell` with attention. """ def __init__(self, cell, attention_mechanism, attention_layer_size=None, alignment_history=False, cell_input_fn=None, output_attention=True, initial_cell_state=None, name=None, attention_layer=None): """Construct the `AttentionWrapper`. **NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in `AttentionWrapper`, then you must ensure that: - The encoder output has been tiled to `beam_width` via `tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`). - The `batch_size` argument passed to the `zero_state` method of this wrapper is equal to `true_batch_size * beam_width`. - The initial state created with `zero_state` above contains a `cell_state` value containing properly tiled final state from the encoder. An example: ``` tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch( encoder_outputs, multiplier=beam_width) tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch( encoder_final_state, multiplier=beam_width) tiled_sequence_length = tf.contrib.seq2seq.tile_batch( sequence_length, multiplier=beam_width) attention_mechanism = MyFavoriteAttentionMechanism( num_units=attention_depth, memory=tiled_inputs, memory_sequence_length=tiled_sequence_length) attention_cell = AttentionWrapper(cell, attention_mechanism, ...) decoder_initial_state = attention_cell.zero_state( dtype, batch_size=true_batch_size * beam_width) decoder_initial_state = decoder_initial_state.clone( cell_state=tiled_encoder_final_state) ``` Args: cell: An instance of `RNNCell`. attention_mechanism: A list of `AttentionMechanism` instances or a single instance. attention_layer_size: A list of Python integers or a single Python integer, the depth of the attention (output) layer(s). If None (default), use the context as attention at each time step. Otherwise, feed the context and cell output into the attention layer to generate attention at each time step. If attention_mechanism is a list, attention_layer_size must be a list of the same length. If attention_layer is set, this must be None. alignment_history: Python boolean, whether to store alignment history from all time steps in the final output state (currently stored as a time major `TensorArray` on which you must call `stack()`). cell_input_fn: (optional) A `callable`. The default is: `lambda inputs, attention: tf.concat([inputs, attention], -1)`. output_attention: Python bool. If `True` (default), the output at each time step is the attention value. This is the behavior of Luong-style attention mechanisms. If `False`, the output at each time step is the output of `cell`. This is the behavior of Bhadanau-style attention mechanisms. In both cases, the `attention` tensor is propagated to the next time step via the state and is used there. This flag only controls whether the attention mechanism is propagated up to the next cell in an RNN stack or to the top RNN output. initial_cell_state: The initial state value to use for the cell when the user calls `zero_state()`. Note that if this value is provided now, and the user uses a `batch_size` argument of `zero_state` which does not match the batch size of `initial_cell_state`, proper behavior is not guaranteed. name: Name to use when creating ops. attention_layer: A list of `tf.layers.Layer` instances or a single `tf.layers.Layer` instance taking the context and cell output as inputs to generate attention at each time step. If None (default), use the context as attention at each time step. If attention_mechanism is a list, attention_layer must be a list of the same length. If attention_layers_size is set, this must be None. Raises: TypeError: `attention_layer_size` is not None and (`attention_mechanism` is a list but `attention_layer_size` is not; or vice versa). ValueError: if `attention_layer_size` is not None, `attention_mechanism` is a list, and its length does not match that of `attention_layer_size`; if `attention_layer_size` and `attention_layer` are set simultaneously. """ super(AttentionWrapper, self).__init__(name=name) rnn_cell_impl.assert_like_rnncell("cell", cell) if isinstance(attention_mechanism, (list, tuple)): self._is_multi = True attention_mechanisms = attention_mechanism for attention_mechanism in attention_mechanisms: if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must contain only instances of " "AttentionMechanism, saw type: %s" % type(attention_mechanism).__name__) else: self._is_multi = False if not isinstance(attention_mechanism, AttentionMechanism): raise TypeError( "attention_mechanism must be an AttentionMechanism or list of " "multiple AttentionMechanism instances, saw type: %s" % type(attention_mechanism).__name__) attention_mechanisms = (attention_mechanism,) if cell_input_fn is None: cell_input_fn = ( lambda inputs, attention: tf.concat([inputs, attention], -1)) else: if not callable(cell_input_fn): raise TypeError( "cell_input_fn must be callable, saw type: %s" % type(cell_input_fn).__name__) if attention_layer_size is not None and attention_layer is not None: raise ValueError("Only one of attention_layer_size and attention_layer " "should be set") if attention_layer_size is not None: attention_layer_sizes = tuple( attention_layer_size if isinstance(attention_layer_size, (list, tuple)) else (attention_layer_size,)) if len(attention_layer_sizes) != len(attention_mechanisms): raise ValueError( "If provided, attention_layer_size must contain exactly one " "integer per attention_mechanism, saw: %d vs %d" % (len(attention_layer_sizes), len(attention_mechanisms))) self._attention_layers = tuple( tf.layers.Dense( attention_layer_size, name="attention_layer", use_bias=False, dtype=attention_mechanisms[i].dtype) for i, attention_layer_size in enumerate(attention_layer_sizes)) self._attention_layer_size = sum(attention_layer_sizes) elif attention_layer is not None: self._attention_layers = tuple( attention_layer if isinstance(attention_layer, (list, tuple)) else (attention_layer,)) if len(self._attention_layers) != len(attention_mechanisms): raise ValueError( "If provided, attention_layer must contain exactly one " "layer per attention_mechanism, saw: %d vs %d" % (len(self._attention_layers), len(attention_mechanisms))) self._attention_layer_size = sum( layer.compute_output_shape( [None, cell.output_size + mechanism.values.shape[-1].value])[-1].value for layer, mechanism in zip( self._attention_layers, attention_mechanisms)) else: self._attention_layers = None self._attention_layer_size = sum( attention_mechanism.values.get_shape()[-1].value for attention_mechanism in attention_mechanisms) self._cell = cell self._attention_mechanisms = attention_mechanisms self._cell_input_fn = cell_input_fn self._output_attention = output_attention self._alignment_history = alignment_history with tf.name_scope(name, "AttentionWrapperInit"): if initial_cell_state is None: self._initial_cell_state = None else: final_state_tensor = nest.flatten(initial_cell_state)[-1] state_batch_size = ( final_state_tensor.shape[0].value or tf.shape(final_state_tensor)[0]) error_message = ( "When constructing AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and initial_cell_state. Are you using " "the BeamSearchDecoder? You may need to tile your initial state " "via the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with tf.control_dependencies( self._batch_size_checks(state_batch_size, error_message)): self._initial_cell_state = nest.map_structure( lambda s: tf.identity(s, name="check_initial_cell_state"), initial_cell_state) def force_build(self, cell_input_shape, query_shape): def _build_cell(cell, input_shape): if isinstance(cell, tf.nn.rnn_cell.DropoutWrapper): _build_cell(cell._cell, input_shape) elif isinstance(cell, tf.nn.rnn_cell.ResidualWrapper): _build_cell(cell._cell, input_shape) else: cell.build(input_shape) _build_cell(self._cell, (cell_input_shape[-1] + self._attention_layer_size)) for am in self._attention_mechanisms: am.build(query_shape) self.built = True def _batch_size_checks(self, batch_size, error_message): return [] def _item_or_tuple(self, seq): """Returns `seq` as tuple or the singular element. Which is returned is determined by how the AttentionMechanism(s) were passed to the constructor. Args: seq: A non-empty sequence of items or generator. Returns: Either the values in the sequence as a tuple if AttentionMechanism(s) were passed to the constructor as a sequence or the singular element. """ t = tuple(seq) if self._is_multi: return t else: return t[0] @property def output_size(self): if self._output_attention: return self._attention_layer_size else: return self._cell.output_size @property def state_size(self): """The `state_size` property of `AttentionWrapper`. Returns: An `AttentionWrapperState` tuple containing shapes used by this object. """ return AttentionWrapperState( cell_state=self._cell.state_size, time=tf.TensorShape([]), attention=self._attention_layer_size, alignments=self._item_or_tuple( a.alignments_size for a in self._attention_mechanisms), attention_state=self._item_or_tuple( a.state_size for a in self._attention_mechanisms), alignment_history=self._item_or_tuple( a.alignments_size if self._alignment_history else () for a in self._attention_mechanisms)) # sometimes a TensorArray def zero_state(self, batch_size, dtype): """Return an initial (zero) state tuple for this `AttentionWrapper`. **NOTE** Please see the initializer documentation for details of how to call `zero_state` if using an `AttentionWrapper` with a `BeamSearchDecoder`. Args: batch_size: `0D` integer tensor: the batch size. dtype: The internal state data type. Returns: An `AttentionWrapperState` tuple containing zeroed out tensors and, possibly, empty `TensorArray` objects. Raises: ValueError: (or, possibly at runtime, InvalidArgument), if `batch_size` does not match the output size of the encoder passed to the wrapper object at initialization time. """ with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]): if self._initial_cell_state is not None: cell_state = self._initial_cell_state else: cell_state = self._cell.zero_state(batch_size, dtype) error_message = ( "When calling zero_state of AttentionWrapper %s: " % self._base_name + "Non-matching batch sizes between the memory " "(encoder output) and the requested batch size. Are you using " "the BeamSearchDecoder? If so, make sure your encoder output has " "been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and " "the batch_size= argument passed to zero_state is " "batch_size * beam_width.") with tf.control_dependencies( self._batch_size_checks(batch_size, error_message)): cell_state = nest.map_structure( lambda s: tf.identity(s, name="checked_cell_state"), cell_state) initial_alignments = [ attention_mechanism.initial_alignments(batch_size, dtype) for attention_mechanism in self._attention_mechanisms] return AttentionWrapperState( cell_state=cell_state, time=tf.zeros([], dtype=tf.int64), attention=_zero_state_tensors(self._attention_layer_size, batch_size, dtype), alignments=self._item_or_tuple(initial_alignments), attention_state=self._item_or_tuple( attention_mechanism.initial_state(batch_size, dtype) for attention_mechanism in self._attention_mechanisms), alignment_history=self._item_or_tuple( tf.TensorArray( dtype, size=0, dynamic_size=True, element_shape=alignment.shape) if self._alignment_history else () for alignment in initial_alignments)) def call(self, inputs, state): """Perform a step of attention-wrapped RNN. - Step 1: Mix the `inputs` and previous step's `attention` output via `cell_input_fn`. - Step 2: Call the wrapped `cell` with this input and its previous state. - Step 3: Score the cell's output with `attention_mechanism`. - Step 4: Calculate the alignments by passing the score through the `normalizer`. - Step 5: Calculate the context vector as the inner product between the alignments and the attention_mechanism's values (memory). - Step 6: Calculate the attention output by concatenating the cell output and context through the attention layer (a linear layer with `attention_layer_size` outputs). Args: inputs: (Possibly nested tuple of) Tensor, the input at this time step. state: An instance of `AttentionWrapperState` containing tensors from the previous time step. Returns: A tuple `(attention_or_cell_output, next_state)`, where: - `attention_or_cell_output` depending on `output_attention`. - `next_state` is an instance of `AttentionWrapperState` containing the state calculated at this time step. Raises: TypeError: If `state` is not an instance of `AttentionWrapperState`. """ if not isinstance(state, AttentionWrapperState): raise TypeError("Expected state to be instance of AttentionWrapperState. " "Received type %s instead." % type(state)) # Step 1: Calculate the true inputs to the cell based on the # previous attention value. cell_inputs = self._cell_input_fn(inputs, state.attention) cell_state = state.cell_state cell_output, next_cell_state = self._cell(cell_inputs, cell_state) cell_batch_size = ( cell_output.shape[0].value or tf.shape(cell_output)[0]) error_message = ( "When applying AttentionWrapper %s: " % self.name + "Non-matching batch sizes between the memory " "(encoder output) and the query (decoder output). Are you using " "the BeamSearchDecoder? You may need to tile your memory input via " "the tf.contrib.seq2seq.tile_batch function with argument " "multiple=beam_width.") with tf.control_dependencies( self._batch_size_checks(cell_batch_size, error_message)): cell_output = tf.identity( cell_output, name="checked_cell_output") if self._is_multi: previous_attention_state = state.attention_state previous_alignment_history = state.alignment_history else: previous_attention_state = [state.attention_state] previous_alignment_history = [state.alignment_history] all_alignments = [] all_attentions = [] all_attention_states = [] maybe_all_histories = [] for i, attention_mechanism in enumerate(self._attention_mechanisms): attention, alignments, next_attention_state = _compute_attention( attention_mechanism, cell_output, previous_attention_state[i], self._attention_layers[i] if self._attention_layers else None) alignment_history = previous_alignment_history[i].write( state.time, alignments) if self._alignment_history else () all_attention_states.append(next_attention_state) all_alignments.append(alignments) all_attentions.append(attention) maybe_all_histories.append(alignment_history) attention = tf.concat(all_attentions, 1) next_state = AttentionWrapperState( time=state.time + 1, cell_state=next_cell_state, attention=attention, attention_state=self._item_or_tuple(all_attention_states), alignments=self._item_or_tuple(all_alignments), alignment_history=self._item_or_tuple(maybe_all_histories)) if self._output_attention: return attention, next_state else: return cell_output, next_state class BahdanauAttentionFusedLayer(object): """Fused attention layer using Bahdanau attention. Only used during training. """ def __init__(self, num_units, memory, memory_sequence_length=None, dtype=None, name="BahdanauAttention"): self.v = tf.get_variable("attention_v", [num_units], dtype=dtype) # TODO(jamesqin): I have no idea why the following is happening. # If not doing read_value(), later in backprop it would complain about # AddN op is fed with "resource" dtype input, which shouldn't be happening. if isinstance(self.v, resource_variable_ops.ResourceVariable): self.v = self.v.read_value() # Scalar used in weight normalization self.g = tf.get_variable( "attention_g", dtype=dtype, initializer=tf.constant_initializer(math.sqrt((1. / num_units))), shape=()) if isinstance(self.g, resource_variable_ops.ResourceVariable): self.g = self.g.read_value() # Bias added prior to the nonlinearity self.b = tf.get_variable( "attention_b", [num_units], dtype=dtype, initializer=tf.zeros_initializer()) if isinstance(self.b, resource_variable_ops.ResourceVariable): self.b = self.b.read_value() self.query_layer = tf.layers.Dense( num_units, name="query_layer", use_bias=False, dtype=dtype) self.memory_layer = tf.layers.Dense( num_units, name="memory_layer", use_bias=False, dtype=dtype) def _mask_score(score, memory_sequence_length): # score is [batch, query_max_t, memory_t] if memory_sequence_length is None: return score # [batch, 1] memory_sequence_length = tf.expand_dims(memory_sequence_length, -1) query_max_t = tf.shape(score)[1] # [batch, query_t] memory_sequence_length = tf.broadcast_to( memory_sequence_length, [tf.shape(memory_sequence_length)[0], query_max_t]) # [batch, query_t, memory_t] score_mask = tf.sequence_mask( memory_sequence_length, maxlen=tf.shape(score)[-1]) score_mask_value = tf.as_dtype(score.dtype).as_numpy_dtype(-np.inf) score_mask_values = score_mask_value * tf.ones_like(score) return tf.where(score_mask, score, score_mask_values) # score: [batch, query_max_t, memory_t] # memory_seq_length: [batch] self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda tf.nn.softmax(_mask_score(score, memory_sequence_length))) with tf.name_scope(name, "BaseAttentionMechanismInit", [memory]): # Mask padded memory. # [batch, memory_t, units] self.values = _prepare_memory( memory, memory_sequence_length, check_inner_dims_defined=True) # [batch, memory_t, units] self.keys = self.memory_layer(self.values) def __call__(self, queries): """Invoke the layer. Args: queries: [batch, queryt_t, query_size] Returns: attention: [batch, query_t, num_units] """ # Compute score for all queries, returns [batch, query_t, memory_t]. # The var scope naming is chosen to build inference graph correctly. with tf.variable_scope( "multi_rnn_cell/cell_0_attention/attention/bahdanau_attention", [queries]): # [batch, query_t, query_size] -> [batch, query_t, num_units] processed_queries = self.query_layer(queries) # [batch, memory_t, num_units] -> [batch, 1, memory_t, num_units] keys = tf.expand_dims(self.keys, axis=1) # [batch, query_t, num_units] -> [batch, query_t, 1, num_units] processed_queries = tf.expand_dims(processed_queries, axis=2) # [batch, 1, memory_t, units] + [batch, query_t, 1, units] + [units] -> # [batch, query_t, memory_t, units] activation = tf.tanh(keys + processed_queries + self.b) # [units, 1] normed_v = self.g * self.v * tf.rsqrt(tf.reduce_sum(tf.square(self.v))) v = tf.expand_dims(normed_v, -1) # [batch, query_t, memory_t, units] * [units, 1] -> # [batch, query_t, memory_t, 1] # [batch, query_t, memory_t, 1] --(squeeze)--> [batch, query_t, memory_t] score = tf.squeeze(tf.tensordot(activation, v, axes=1), axis=-1) # Compute alignment # bahdanau attention doesn't use the attention state in prob func (softmax) unused_state = None # [batch, query_t, memory_t] alignments = self._probability_fn(score, unused_state) # Note: slow batched matmul in fp16 # [batch, query_t, memory_t] * [ batch, memory_t, units] -> # [batch, query_t, units] attention = math_utils.BatchMatMul(alignments, self.values) return attention
TensorFlow/Detection/SSD/models/research/slim/nets/nasnet
nasnet
pnasnet
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains the definition for the PNASNet classification networks. Paper: https://arxiv.org/abs/1712.00559 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import tensorflow as tf from nets.nasnet import nasnet from nets.nasnet import nasnet_utils arg_scope = tf.contrib.framework.arg_scope slim = tf.contrib.slim def large_imagenet_config(): """Large ImageNet configuration based on PNASNet-5.""" return tf.contrib.training.HParams( stem_multiplier=3.0, dense_dropout_keep_prob=0.5, num_cells=12, filter_scaling_rate=2.0, num_conv_filters=216, drop_path_keep_prob=0.6, use_aux_head=1, num_reduction_layers=2, data_format='NHWC', skip_reduction_layer_input=1, total_training_steps=250000, use_bounded_activation=False, ) def mobile_imagenet_config(): """Mobile ImageNet configuration based on PNASNet-5.""" return tf.contrib.training.HParams( stem_multiplier=1.0, dense_dropout_keep_prob=0.5, num_cells=9, filter_scaling_rate=2.0, num_conv_filters=54, drop_path_keep_prob=1.0, use_aux_head=1, num_reduction_layers=2, data_format='NHWC', skip_reduction_layer_input=1, total_training_steps=250000, use_bounded_activation=False, ) def pnasnet_large_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): """Default arg scope for the PNASNet Large ImageNet model.""" return nasnet.nasnet_large_arg_scope( weight_decay, batch_norm_decay, batch_norm_epsilon) def pnasnet_mobile_arg_scope(weight_decay=4e-5, batch_norm_decay=0.9997, batch_norm_epsilon=0.001): """Default arg scope for the PNASNet Mobile ImageNet model.""" return nasnet.nasnet_mobile_arg_scope(weight_decay, batch_norm_decay, batch_norm_epsilon) def _build_pnasnet_base(images, normal_cell, num_classes, hparams, is_training, final_endpoint=None): """Constructs a PNASNet image model.""" end_points = {} def add_and_check_endpoint(endpoint_name, net): end_points[endpoint_name] = net return final_endpoint and (endpoint_name == final_endpoint) # Find where to place the reduction cells or stride normal cells reduction_indices = nasnet_utils.calc_reduction_layers( hparams.num_cells, hparams.num_reduction_layers) # pylint: disable=protected-access stem = lambda: nasnet._imagenet_stem(images, hparams, normal_cell) # pylint: enable=protected-access net, cell_outputs = stem() if add_and_check_endpoint('Stem', net): return net, end_points # Setup for building in the auxiliary head. aux_head_cell_idxes = [] if len(reduction_indices) >= 2: aux_head_cell_idxes.append(reduction_indices[1] - 1) # Run the cells filter_scaling = 1.0 # true_cell_num accounts for the stem cells true_cell_num = 2 activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu for cell_num in range(hparams.num_cells): is_reduction = cell_num in reduction_indices stride = 2 if is_reduction else 1 if is_reduction: filter_scaling *= hparams.filter_scaling_rate if hparams.skip_reduction_layer_input or not is_reduction: prev_layer = cell_outputs[-2] net = normal_cell( net, scope='cell_{}'.format(cell_num), filter_scaling=filter_scaling, stride=stride, prev_layer=prev_layer, cell_num=true_cell_num) if add_and_check_endpoint('Cell_{}'.format(cell_num), net): return net, end_points true_cell_num += 1 cell_outputs.append(net) if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and num_classes and is_training): aux_net = activation_fn(net) # pylint: disable=protected-access nasnet._build_aux_head(aux_net, end_points, num_classes, hparams, scope='aux_{}'.format(cell_num)) # pylint: enable=protected-access # Final softmax layer with tf.variable_scope('final_layer'): net = activation_fn(net) net = nasnet_utils.global_avg_pool(net) if add_and_check_endpoint('global_pool', net) or not num_classes: return net, end_points net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout') logits = slim.fully_connected(net, num_classes) if add_and_check_endpoint('Logits', logits): return net, end_points predictions = tf.nn.softmax(logits, name='predictions') if add_and_check_endpoint('Predictions', predictions): return net, end_points return logits, end_points def build_pnasnet_large(images, num_classes, is_training=True, final_endpoint=None, config=None): """Build PNASNet Large model for the ImageNet Dataset.""" hparams = copy.deepcopy(config) if config else large_imagenet_config() # pylint: disable=protected-access nasnet._update_hparams(hparams, is_training) # pylint: enable=protected-access if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': tf.logging.info('A GPU is available on the machine, consider using NCHW ' 'data format for increased speed on GPU.') if hparams.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2]) # Calculate the total number of cells in the network. # There is no distinction between reduction and normal cells in PNAS so the # total number of cells is equal to the number normal cells plus the number # of stem cells (two by default). total_num_cells = hparams.num_cells + 2 normal_cell = PNasNetNormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps, hparams.use_bounded_activation) with arg_scope( [slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training): with arg_scope([slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim], data_format=hparams.data_format): return _build_pnasnet_base( images, normal_cell=normal_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, final_endpoint=final_endpoint) build_pnasnet_large.default_image_size = 331 def build_pnasnet_mobile(images, num_classes, is_training=True, final_endpoint=None, config=None): """Build PNASNet Mobile model for the ImageNet Dataset.""" hparams = copy.deepcopy(config) if config else mobile_imagenet_config() # pylint: disable=protected-access nasnet._update_hparams(hparams, is_training) # pylint: enable=protected-access if tf.test.is_gpu_available() and hparams.data_format == 'NHWC': tf.logging.info('A GPU is available on the machine, consider using NCHW ' 'data format for increased speed on GPU.') if hparams.data_format == 'NCHW': images = tf.transpose(images, [0, 3, 1, 2]) # Calculate the total number of cells in the network. # There is no distinction between reduction and normal cells in PNAS so the # total number of cells is equal to the number normal cells plus the number # of stem cells (two by default). total_num_cells = hparams.num_cells + 2 normal_cell = PNasNetNormalCell(hparams.num_conv_filters, hparams.drop_path_keep_prob, total_num_cells, hparams.total_training_steps, hparams.use_bounded_activation) with arg_scope( [slim.dropout, nasnet_utils.drop_path, slim.batch_norm], is_training=is_training): with arg_scope( [ slim.avg_pool2d, slim.max_pool2d, slim.conv2d, slim.batch_norm, slim.separable_conv2d, nasnet_utils.factorized_reduction, nasnet_utils.global_avg_pool, nasnet_utils.get_channel_index, nasnet_utils.get_channel_dim ], data_format=hparams.data_format): return _build_pnasnet_base( images, normal_cell=normal_cell, num_classes=num_classes, hparams=hparams, is_training=is_training, final_endpoint=final_endpoint) build_pnasnet_mobile.default_image_size = 224 class PNasNetNormalCell(nasnet_utils.NasNetABaseCell): """PNASNet Normal Cell.""" def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation=False): # Configuration for the PNASNet-5 model. operations = [ 'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3', 'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3', 'separable_3x3_2', 'none' ] used_hiddenstates = [1, 1, 0, 0, 0, 0, 0] hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0] super(PNasNetNormalCell, self).__init__( num_conv_filters, operations, used_hiddenstates, hiddenstate_indices, drop_path_keep_prob, total_num_cells, total_training_steps, use_bounded_activation)
PyTorch/LanguageModeling/BERT/data
data
SquadDownloader
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import bz2 import os import urllib.request import sys class SquadDownloader: def __init__(self, save_path): self.save_path = save_path + '/squad' if not os.path.exists(self.save_path): os.makedirs(self.save_path) if not os.path.exists(self.save_path + '/v1.1'): os.makedirs(self.save_path + '/v1.1') if not os.path.exists(self.save_path + '/v2.0'): os.makedirs(self.save_path + '/v2.0') self.download_urls = { 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json' : 'v1.1/train-v1.1.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json' : 'v1.1/dev-v1.1.json', 'https://worksheets.codalab.org/rest/bundles/0xbcd57bee090b421c982906709c8c27e1/contents/blob/' : 'v1.1/evaluate-v1.1.py', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json' : 'v2.0/train-v2.0.json', 'https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json' : 'v2.0/dev-v2.0.json', 'https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/' : 'v2.0/evaluate-v2.0.py', } def download(self): for item in self.download_urls: url = item file = self.download_urls[item] print('Downloading:', url) if os.path.isfile(self.save_path + '/' + file): print('** Download file already exists, skipping download') else: response = urllib.request.urlopen(url) with open(self.save_path + '/' + file, "wb") as handle: handle.write(response.read())
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks
networks
span_labeling_test
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for span_labeling network.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf from tensorflow.python.keras import keras_parameterized # pylint: disable=g-direct-tensorflow-import from official.nlp.modeling.networks import span_labeling # This decorator runs the test in V1, V2-Eager, and V2-Functional mode. It # guarantees forward compatibility of this code for the V2 switchover. @keras_parameterized.run_all_keras_modes class SpanLabelingTest(keras_parameterized.TestCase): def test_network_creation(self): """Validate that the Keras object can be created.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) start_outputs, end_outputs = test_network(sequence_data) # Validate that the outputs are of the expected shape. expected_output_shape = [None, sequence_length] self.assertEqual(expected_output_shape, start_outputs.shape.as_list()) self.assertEqual(expected_output_shape, end_outputs.shape.as_list()) def test_network_invocation(self): """Validate that the Keras object can be invoked.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling(input_width=input_width) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) outputs = test_network(sequence_data) model = tf.keras.Model(sequence_data, outputs) # Invoke the network as part of a Model. batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) # Validate that the outputs are of the expected shape. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) def test_network_invocation_with_internal_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model( test_network.inputs, [test_network.start_logits, test_network.end_logits]) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_network_invocation_with_external_logit_output(self): """Validate that the logit outputs are correct.""" sequence_length = 15 input_width = 512 test_network = span_labeling.SpanLabeling( input_width=input_width, output='predictions') logit_network = span_labeling.SpanLabeling( input_width=input_width, output='logits') logit_network.set_weights(test_network.get_weights()) # Create a 3-dimensional input (the first dimension is implicit). sequence_data = tf.keras.Input( shape=(sequence_length, input_width), dtype=tf.float32) output = test_network(sequence_data) logit_output = logit_network(sequence_data) model = tf.keras.Model(sequence_data, output) logit_model = tf.keras.Model(sequence_data, logit_output) batch_size = 3 input_data = 10 * np.random.random_sample( (batch_size, sequence_length, input_width)) start_outputs, end_outputs = model.predict(input_data) start_logits, end_logits = logit_model.predict(input_data) # Ensure that the tensor shapes are correct. expected_output_shape = (batch_size, sequence_length) self.assertEqual(expected_output_shape, start_outputs.shape) self.assertEqual(expected_output_shape, end_outputs.shape) self.assertEqual(expected_output_shape, start_logits.shape) self.assertEqual(expected_output_shape, end_logits.shape) # Ensure that the logits, when softmaxed, create the outputs. input_tensor = tf.keras.Input(expected_output_shape[1:]) output_tensor = tf.keras.layers.Activation(tf.nn.log_softmax)(input_tensor) softmax_model = tf.keras.Model(input_tensor, output_tensor) start_softmax = softmax_model.predict(start_logits) self.assertAllClose(start_outputs, start_softmax) end_softmax = softmax_model.predict(end_logits) self.assertAllClose(end_outputs, end_softmax) def test_serialize_deserialize(self): # Create a network object that sets all of its config options. network = span_labeling.SpanLabeling( input_width=128, activation='relu', initializer='zeros', output='predictions') # Create another network object from the first object's config. new_network = span_labeling.SpanLabeling.from_config(network.get_config()) # Validate that the config can be forced to JSON. _ = new_network.to_json() # If the serialization was successful, the new config should match the old. self.assertAllEqual(network.get_config(), new_network.get_config()) def test_unknown_output_type_fails(self): with self.assertRaisesRegex(ValueError, 'Unknown `output` value "bad".*'): _ = span_labeling.SpanLabeling(input_width=10, output='bad') if __name__ == '__main__': tf.test.main()
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model
model
xgboost
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: models.tspp_xgboost.TSPPXGBoost config: max_depth: 10 learning_rate: 0.2 subsample: 1.0 colsample_bytree: 0.8 tree_method: gpu_hist n_rounds: 400 objective: reg:squarederror defaults: - _self_ - /trainer@_global_/trainer: xgbtrainer
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/utils/types
types
array_type
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TypeVar import cupy as cp import numpy as np NDArray = TypeVar('NDArray', np.ndarray, cp.ndarray)
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
static_shape
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions to access TensorShape values. The rank 4 tensor_shape must be of the form [batch_size, height, width, depth]. """ def get_batch_size(tensor_shape): """Returns batch size from the tensor shape. Args: tensor_shape: A rank 4 TensorShape. Returns: An integer representing the batch size of the tensor. """ tensor_shape.assert_has_rank(rank=4) return tensor_shape[0].value def get_height(tensor_shape): """Returns height from the tensor shape. Args: tensor_shape: A rank 4 TensorShape. Returns: An integer representing the height of the tensor. """ tensor_shape.assert_has_rank(rank=4) return tensor_shape[1].value def get_width(tensor_shape): """Returns width from the tensor shape. Args: tensor_shape: A rank 4 TensorShape. Returns: An integer representing the width of the tensor. """ tensor_shape.assert_has_rank(rank=4) return tensor_shape[2].value def get_depth(tensor_shape): """Returns depth from the tensor shape. Args: tensor_shape: A rank 4 TensorShape. Returns: An integer representing the depth of the tensor. """ tensor_shape.assert_has_rank(rank=4) return tensor_shape[3].value
TensorFlow/Detection/SSD/models/research/object_detection/metrics
metrics
offline_eval_map_corloc_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for utilities in offline_eval_map_corloc binary.""" import tensorflow as tf from object_detection.metrics import offline_eval_map_corloc as offline_eval class OfflineEvalMapCorlocTest(tf.test.TestCase): def test_generateShardedFilenames(self): test_filename = '/path/to/file' result = offline_eval._generate_sharded_filenames(test_filename) self.assertEqual(result, [test_filename]) test_filename = '/path/to/file-00000-of-00050' result = offline_eval._generate_sharded_filenames(test_filename) self.assertEqual(result, [test_filename]) result = offline_eval._generate_sharded_filenames('/path/to/@3.record') self.assertEqual(result, [ '/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record' ]) result = offline_eval._generate_sharded_filenames('/path/to/abc@3') self.assertEqual(result, [ '/path/to/abc-00000-of-00003', '/path/to/abc-00001-of-00003', '/path/to/abc-00002-of-00003' ]) result = offline_eval._generate_sharded_filenames('/path/to/@1') self.assertEqual(result, ['/path/to/-00000-of-00001']) def test_generateFilenames(self): test_filenames = ['/path/to/file', '/path/to/@3.record'] result = offline_eval._generate_filenames(test_filenames) self.assertEqual(result, [ '/path/to/file', '/path/to/-00000-of-00003.record', '/path/to/-00001-of-00003.record', '/path/to/-00002-of-00003.record' ]) if __name__ == '__main__': tf.test.main()
TensorFlow/LanguageModeling
LanguageModeling
README
# Language Modeling Language modeling (LM) is a natural language processing (NLP) task that determines the probability of a given sequence of words occurring in a sentence. In an era where computers, smartphones and other electronic devices increasingly need to interact with humans, language modeling has become an indispensable technique for teaching devices how to communicate in natural languages in human-like ways. But how does language modeling work? And what can you build with it? What are the different approaches, what are its potential benefits and limitations, and how might you use it in your business? In this guide, you’ll find answers to all of those questions and more. Whether you’re an experienced machine learning engineer considering implementation, a developer wanting to learn more, or a product manager looking to explore what’s possible with natural language processing and language modeling, this guide is for you. Here’s a look at what we’ll cover: - Language modeling – the basics - How does language modeling work? - Use cases and applications - Getting started ## Language modeling – the basics ### What is language modeling? "*Language modeling is the task of assigning a probability to sentences in a language. […] Besides assigning a probability to each sequence of words, the language models also assign a probability for the likelihood of a given word (or a sequence of words) to follow a sequence of words.*" Source: Page 105, [Neural Network Methods in Natural Language Processing](http://amzn.to/2wt1nzv), 2017. ### Types of language models There are primarily two types of Language Models: - Statistical Language Models: These models use traditional statistical techniques like N-grams, Hidden Markov Models (HMM), and certain linguistic rules to learn the probability distribution of words. - Neural Language Models: They use different kinds of Neural Networks to model language, and have surpassed the statistical language models in their effectiveness. "*We provide ample empirical evidence to suggest that connectionist language models are superior to standard n-gram techniques, except their high computational (training) complexity.*" Source: [Recurrent neural network based language model](http://www.fit.vutbr.cz/research/groups/speech/publi/2010/mikolov_interspeech2010_IS100722.pdf), 2010. Given the superior performance of neural language models, we include in the container two popular state-of-the-art neural language models: BERT and Transformer-XL. ### Why is language modeling important? Language modeling is fundamental in modern NLP applications. It enables machines to understand qualitative information, and enables people to communicate with machines in the natural languages that humans use to communicate with each other. Language modeling is used directly in a variety of industries, including tech, finance, healthcare, transportation, legal, military, government, and more -- actually, you probably have just interacted with a language model today, whether it be through Google search, engaging with a voice assistant, or using text autocomplete features. ## How does language modeling work? The roots of modern language modeling can be traced back to 1948, when Claude Shannon published a paper titled "A Mathematical Theory of Communication", laying the foundation for information theory and language modeling. In the paper, Shannon detailed the use of a stochastic model called the Markov chain to create a statistical model for the sequences of letters in English text. The Markov models, along with n-gram, are still among the most popular statistical language models today. However, simple statistical language models have serious drawbacks in scalability and fluency because of its sparse representation of language. Overcoming the problem by representing language units (eg. words, characters) as a non-linear, distributed combination of weights in continuous space, neural language models can learn to approximate words without being misled by rare or unknown values. Therefore, as mentioned above, we introduce two popular state-of-the-art neural language models, BERT and Transformer-XL, in Tensorflow and PyTorch. More details can be found in the [NVIDIA Deep Learning Examples Github Repository ](https://github.com/NVIDIA/DeepLearningExamples) ## Use cases and applications ### Speech Recognition Imagine speaking a phrase to the phone, expecting it to convert the speech to text. How does it know if you said "recognize speech" or "wreck a nice beach"? Language models help figure it out based on the context, enabling machines to process and make sense of speech audio. ### Spelling Correction Language-models-enabled spellcheckers can point to spelling errors and possibly suggest alternatives. ### Machine translation Imagine you are translating the Chinese sentence "我在开车" into English. Your translation system gives you several choices: - I at open car - me at open car - I at drive - me at drive - I am driving - me am driving A language model tells you which translation sounds the most natural. ## Getting started NVIDIA provides examples for Language Modeling on [Deep Learning Examples Github Repository](https://github.com/NVIDIA/DeepLearningExamples). These examples provide you with easy to consume and highly optimized scripts for both training and inferencing. The quick start guide at our GitHub repository will help you in setting up the environment using NGC Docker Images, download pre-trained models from NGC and adapt the model training and inference for your application/use-case. These models are tested and maintained by NVIDIA, leveraging mixed precision using tensor cores on our latest GPUs for faster training times while maintaining accuracy.
TensorFlow/Segmentation/UNet_Industrial/model/layers
layers
padding
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================== # # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ============================================================================== import tensorflow as tf from model.layers.utils import _log_hparams __all__ = ['pad'] def pad(inputs, paddings, mode='CONSTANT', name='padding', constant_values=0): if mode.upper() not in ['CONSTANT', 'REFLECT', 'SYMMETRIC']: raise ValueError("Unknown padding mode: `%s` (accepted: ['CONSTANT', 'REFLECT', 'SYMMETRIC'])" % mode) net = tf.pad(inputs, paddings=paddings, mode=mode, name=name, constant_values=constant_values) _log_hparams( classname='Padding', layername=net.name, paddings=paddings, mode=mode, constant_values=constant_values, out_shape=str(net.get_shape()), out_dtype=net.dtype ) return net
PyTorch/LanguageModeling/Transformer-XL/pytorch
pytorch
run_wt103_base
#!/bin/bash # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. export OMP_NUM_THREADS=1 if [[ "$1" == 'train' ]]; then echo 'Run training...' python -m torch.distributed.launch --nproc_per_node="$2" train.py \ --config_file wt103_base.yaml \ "${@:3}" elif [[ "$1" == 'eval' ]]; then echo 'Run evaluation...' python -m torch.distributed.launch --nproc_per_node="$2" eval.py \ --config_file wt103_base.yaml \ "${@:3}" else echo 'unknown argment 1' fi
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/transforms
transforms
base_transform
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC class BaseTransform(ABC): """Base class for all transforms. The `BaseTransform` class contains methods that must be implemented by specific transforms objects. The `fit` method is optional. """ def fit(self, data): """Fits the transform on the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: None """ pass def transform(self, data): """Transform the data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to transform. Returns: numpy.array: Transformed data. """ raise NotImplementedError() def fit_transform(self, data): """Fit to the data and then return the transformed data. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to fit and transform Returns: Transformed data. """ self.fit(data) return self.transform(data) def inverse_transform(self, data): """Reverses the transformation done on the data back to original values. Args: data (pandas.Series or cudf.Series or numpy.array or cupy.array): Data to inverse-transform. Returns: Inverse transformed data. """ raise NotImplementedError()
TensorFlow2/Recommendation/SIM/preprocessing
preprocessing
sim_preprocessing
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Preprocessing script for SIM models.""" import logging import multiprocessing import os import click import cudf import cupy import dask.dataframe import dask_cudf import rmm from preprocessing.io import load_metadata, load_review_data, save_metadata from preprocessing.ops import ExplodeSequence, add_negative_sequence, list_slice, slice_and_pad_left DASK_TRAIN_DATASET_CHUNKSIZE = 15_000 TRAIN_DATA_DIR = "train" TEST_DATA_DIR = "test" TEST_DATA_FILE = "part.0.parquet" CATEGORIZED_METADATA_FILE = "metadata.json" OUTPUT_META = { "label": "int8", "uid": "int64", "item": "int32", "cat": "int32", "item_sequence": "list", "cat_sequence": "list", "neg_item_sequence": "list", "neg_cat_sequence": "list", } logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s: %(message)s", ) def add_categorified_column(df, col_name, id_col_name): unique_values = df[col_name].unique().to_frame() unique_values[id_col_name] = cupy.arange(len(unique_values), dtype="int32") + 1 df = df.merge(unique_values, how="left", on=col_name) return df def categorify_items(all_items_unique: cudf.DataFrame, metadata: cudf.DataFrame) -> cudf.DataFrame: unique_item_with_category = all_items_unique.merge(metadata, how="left", on="item") unique_item_with_category = unique_item_with_category.fillna("no_category") df = add_categorified_column(unique_item_with_category, "item", "item_id") df = add_categorified_column(df, "cat", "cat_id") return df def filter_too_short_sequences(reviews: cudf.DataFrame, min_seq_length: int) -> cudf.DataFrame: user_counts = reviews["user"].value_counts() user_counts_filtered = user_counts[user_counts >= min_seq_length] valid_users = user_counts_filtered.index reviews = reviews[reviews["user"].isin(valid_users)] reviews.reset_index(drop=True, inplace=True) return reviews def add_items_and_categories_indices( reviews: cudf.DataFrame, item_and_cat_with_ids: cudf.DataFrame, ) -> cudf.DataFrame: return reviews.merge(item_and_cat_with_ids, how="left", on="item") def categorify_users(reviews: cudf.DataFrame) -> cudf.DataFrame: return add_categorified_column(reviews, "user", "uid") def create_sampling_df( all_items: cudf.DataFrame, item_and_cat_with_ids: cudf.DataFrame ) -> cudf.DataFrame: sampling_df = all_items.merge(item_and_cat_with_ids, how="left", on="item") sampling_df = sampling_df[["item_id", "cat_id"]] sampling_df = sampling_df.sort_values(by="item_id") sampling_df.reset_index(drop=True, inplace=True) return sampling_df def aggregate_per_user(df): df = df.sort_values(by=["unixReviewTime", "item"]) df = df.groupby("uid").agg({ "item_id": list, "cat_id": list, }) df.reset_index(inplace=True) df = df.rename(columns={ "item_id": "item_sequence", "cat_id": "cat_sequence", }) df["item"] = df["item_sequence"].list.get(-1) df["cat"] = df["cat_sequence"].list.get(-1) df["item_sequence"] = list_slice(df["item_sequence"], 0, -1) df["cat_sequence"] = list_slice(df["cat_sequence"], 0, -1) return df def explode_sequence(df: cudf.DataFrame, min_elements: int, max_elements: int) -> cudf.DataFrame: df = ExplodeSequence( col_names=["item_sequence", "cat_sequence"], keep_cols=["uid"], max_elements=max_elements + 1, ).transform(df) df["item"] = df["item_sequence"].list.get(-1) df["cat"] = df["cat_sequence"].list.get(-1) df["item_sequence"] = list_slice(df["item_sequence"], 0, -1) df["cat_sequence"] = list_slice(df["cat_sequence"], 0, -1) df = df[df.item_sequence.list.len() >= min_elements] return df def add_negative_label(pos_df: cudf.DataFrame, sampling_df: cudf.DataFrame) -> cudf.DataFrame: neg_df = pos_df.copy() pos_df["label"] = cupy.int8(1) neg_df["label"] = cupy.int8(0) neg = cupy.random.randint( low=0, high=len(sampling_df), size=len(neg_df), dtype=int, ) neg_item_ids = sampling_df["item_id"].iloc[neg].values neg_df["item"] = neg_item_ids neg_cat_ids = sampling_df["cat_id"].iloc[neg].values neg_df["cat"] = neg_cat_ids df = cudf.concat([pos_df, neg_df]) return df def add_negative_sampling(df: cudf.DataFrame, sampling_df: cudf.DataFrame) -> cudf.DataFrame: df = add_negative_label(df, sampling_df) neg = cupy.random.randint( low=0, high=len(sampling_df), size=int(df.item_sequence.list.len().sum()), dtype=int, ) item_samples = sampling_df["item_id"].iloc[neg] cat_samples = sampling_df["cat_id"].iloc[neg] df["neg_item_sequence"] = add_negative_sequence(df["item_sequence"], item_samples) df["neg_cat_sequence"] = add_negative_sequence(df["cat_sequence"], cat_samples) return df def pad_with_zeros(df: cudf.DataFrame, max_elements: int) -> cudf.DataFrame: df["item_sequence"] = slice_and_pad_left(df["item_sequence"], max_elements) df["cat_sequence"] = slice_and_pad_left(df["cat_sequence"], max_elements) df["neg_item_sequence"] = slice_and_pad_left(df["neg_item_sequence"], max_elements) df["neg_cat_sequence"] = slice_and_pad_left(df["neg_cat_sequence"], max_elements) return df def create_train_dataset( df: cudf.DataFrame, sampling_df: cudf.DataFrame, min_elements: int, max_elements: int, output_path: str, seed: int, dask_scheduler: str = "processes", ) -> None: def transform(df, sampling_df, partition_info): part_seed = seed + partition_info["number"] + 1 cupy.random.seed(part_seed) df = explode_sequence(df, min_elements, max_elements) df = add_negative_sampling(df, sampling_df) df = pad_with_zeros(df, max_elements) df = df.sort_values(by=["uid"]) df.reset_index(drop=True, inplace=True) df = df[list(OUTPUT_META)] return df ddf = dask_cudf.from_cudf(df, chunksize=DASK_TRAIN_DATASET_CHUNKSIZE) ddf = ddf.map_partitions(transform, meta=OUTPUT_META, sampling_df=sampling_df) ddf = ddf.clear_divisions() with dask.config.set(scheduler=dask_scheduler): ddf.to_parquet(output_path, write_index=False, overwrite=True) def create_test_dataset( df: cudf.DataFrame, sampling_df: cudf.DataFrame, max_elements: int, output_path: str, ) -> None: df = add_negative_sampling(df, sampling_df) df = pad_with_zeros(df, max_elements) df = df.sort_values(by=["uid"]) df.reset_index(drop=True, inplace=True) df = df[list(OUTPUT_META)] os.makedirs(output_path, exist_ok=True) output_file = os.path.join(output_path, TEST_DATA_FILE) df.to_parquet(output_file, index=False) @click.command() @click.option( "--amazon_dataset_path", required=True, help="Path to the dataset. Must contain both reviews and metadata json files.", type=str, ) @click.option( "--output_path", required=True, help="Path where preprocessed dataset is saved.", type=str, ) @click.option( "--metadata_file_name", default="meta_Books.json", help="Path to the dataset. Must contain both reviews and metadata json files.", type=str, ) @click.option( "--reviews_file_name", default="reviews_Books.json", help="Path where preprocessed dataset is saved.", type=str, ) @click.option( "--max_sequence_length", default=100, help="Take only `max_sequence_length` last elements of a sequence.", ) @click.option( "--shortest_sequence_for_user", default=20, help="Specifies what is a minimal length of a sequence. " "Every user with a sequence shorter than this value will be discarded." ) @click.option( "--shortest_sequence_for_training", default=1, help="Specifies what is a minimal length of a sequence in a training set.", ) @click.option( "--metadata_loader_n_proc", default=multiprocessing.cpu_count(), help="Specifies the number of processes used to parse metadata.", ) @click.option( "--review_loader_num_workers", default=20, help="Specifies the number of dask workers used to read reviews data. " "Note that, as each worker is a new process, too high value might cause GPU OOM errors." ) @click.option( "--seed", default=12345, help="Seed for reproducibility." "Note that the results can still differ between machines because of dask/cudf non-determinism.", type=int, ) def main( amazon_dataset_path: str, output_path: str, metadata_file_name: str, reviews_file_name: str, max_sequence_length: int, shortest_sequence_for_user: int, shortest_sequence_for_training: int, metadata_loader_n_proc: int, review_loader_num_workers: int, seed: int, ): cupy.random.seed(seed) rmm.reinitialize(managed_memory=True) metadata_path = os.path.join(amazon_dataset_path, metadata_file_name) reviews_path = os.path.join(amazon_dataset_path, reviews_file_name) logging.info("Loading metadata") metadata = load_metadata(metadata_path, metadata_loader_n_proc) assert len(metadata) == metadata["item"].nunique(), "metadata should contain unique items" logging.info("Loading review data") reviews = load_review_data(reviews_path, review_loader_num_workers) logging.info("Removing short user sequences") reviews = filter_too_short_sequences(reviews, shortest_sequence_for_user) logging.info("Categorifying users, items, categories") all_items_unique = reviews["item"].unique().to_frame() item_and_cat_with_ids = categorify_items(all_items_unique, metadata) reviews = add_items_and_categories_indices(reviews, item_and_cat_with_ids) reviews = categorify_users(reviews) logging.info("Aggregating data per user") df = aggregate_per_user(reviews) logging.info("Preparing dataframe for negative sampling") all_items = reviews["item"].to_frame() sampling_df = create_sampling_df(all_items, item_and_cat_with_ids) os.makedirs(output_path, exist_ok=True) logging.info("Creating train dataset") create_train_dataset( df, sampling_df, min_elements=shortest_sequence_for_training, max_elements=max_sequence_length, output_path=os.path.join(output_path, TRAIN_DATA_DIR), seed=seed, ) logging.info("Creating test dataset") create_test_dataset( df, sampling_df, max_elements=max_sequence_length, output_path=os.path.join(output_path, TEST_DATA_DIR), ) logging.info("Saving metadata") save_metadata( number_of_items=len(item_and_cat_with_ids), number_of_categories=item_and_cat_with_ids["cat_id"].nunique(), number_of_users=len(df), output_path=os.path.join(output_path, CATEGORIZED_METADATA_FILE), ) if __name__ == "__main__": main()
MxNet/Classification/RN50v1.5
RN50v1.5
README
# ResNet-50 v1.5 for MXNet This repository provides a script and recipe to train the ResNet-50 v1.5 model to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. ## Table Of Contents - [Model overview](#model-overview) * [Model architecture](#model-architecture) * [Default configuration](#default-configuration) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) - [Setup](#setup) * [Requirements](#requirements) - [Quick Start Guide](#quick-start-guide) - [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Parameters](#parameters) * [Command-line options](#command-line-options) * [Getting the data](#getting-the-data) * [Dataset guidelines](#dataset-guidelines) * [Multi-dataset](#multi-dataset) * [Training process](#training-process) * [Inference process](#inference-process) - [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb) * [Training stability test](#training-stability-test) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-1 (8x V100 16GB)](#training-performance-nvidia-dgx-1-8x-v100-16gb) * [Training performance: NVIDIA DGX-2 (16x V100 32GB)](#training-performance-nvidia-dgx-2-16x-v100-32gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX A100 (1x A100 80GB)](#inference-performance-nvidia-dgx-a100-1x-a100-80gb) * [Inference performance: NVIDIA DGX-1 (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb) * [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4) - [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The ResNet-50 v1.5 model is a modified version of the [original ResNet-50 v1 model](https://arxiv.org/abs/1512.03385). The difference between v1 and v1.5 is in the bottleneck blocks which require downsampling. ResNet v1 has stride = 2 in the first 1x1 convolution, whereas v1.5 has stride = 2 in the 3x3 convolution. This difference makes ResNet-50 v1.5 slightly more accurate (~0.5% top1) than v1, but comes with a small performance drawback (~5% imgs/sec). This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results 3.5x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. ### Model architecture The model architecture was present in [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) paper. The main advantage of the model is the usage of residual layers as a building block that helps with gradient propagation during training. ![ResidualLayer](./img/residual_diagram.png) _Image source: [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)_ ### Default configuration **Optimizer** * SGD with momentum (0.875) * Learning rate = 0.256 for 256 batch size, for other batch sizes we linearly scale the learning rate * Learning rate schedule - we use cosine LR schedule * Linear warmup of the learning rate during the first 5 epochs according to [Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour](https://arxiv.org/abs/1706.02677). * Weight decay: 3.0517578125e-05 (1/32768) * We do not apply WD on batch norm trainable parameters (gamma/bias) * Label Smoothing: 0.1 * We train for: * 50 Epochs - configuration that reaches 75.9% top1 accuracy * 90 Epochs - 90 epochs is a standard for ResNet-50 * 250 Epochs - best possible accuracy. For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf). **Data augmentation** For training: * Normalization * Random resized crop to 224x224 * Scale from 8% to 100% * Aspect ratio from 3/4 to 4/3 * Random horizontal flip For inference: * Normalization * Scale to 256x256 * Center crop to 224x224 ### Feature support matrix | **Feature** | **ResNet-50 MXNet** | |:---:|:--------:| |[DALI](https://docs.nvidia.com/deeplearning/sdk/dali-release-notes/index.html)|yes| |Horovod Multi-GPU|yes| #### Features The following features are supported by this model. **NVIDIA DALI** NVIDIA Data Loading Library (DALI) is a collection of highly optimized building blocks, and an execution engine, to accelerate the pre-processing of the input data for deep learning applications. DALI provides both the performance and the flexibility for accelerating different data pipelines as a single library. This single library can then be easily integrated into different deep learning training and inference applications. **Horovod Multi-GPU** Horovod is a distributed training framework for TensorFlow, Keras, PyTorch, and MXNet. The goal of Horovod is to make distributed deep learning fast and easy to use. For more information about how to get started with Horovod, see the [Horovod: Official repository](https://github.com/horovod/horovod). ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in [CUDA 8](https://devblogs.nvidia.com/parallelforall/tag/fp16/) in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/performance/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. #### Enabling mixed precision Using the Gluon API, ensure you perform the following steps to convert a model that supports computation with float16. 1. Cast Gluon Block‘s parameters and expected input type to float16 by calling the cast method of the Block representing the network. ```python net = net.cast('float16') ``` 2. Ensure the data input to the network is of float16 type. If your DataLoader or Iterator produces output in another datatype, then you have to cast your data. There are different ways you can do this. The easiest way is to use the `astype` method of NDArrays. ```python data = data.astype('float16', copy=False) ``` 3. If you are using images and DataLoader, you can also use a Cast transform. It is preferable to use `multi_precision` mode of optimizer when training in float16. This mode of optimizer maintains a master copy of the weights in float32 even when the training (forward and backward pass) is in float16. This helps increase precision of the weight updates and can lead to faster convergence in some scenarios. ```python optimizer = mx.optimizer.create('sgd', multi_precision=True, lr=0.01) ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ## Setup The following section lists the requirements that you need to meet in order to start training the ResNet-50 v1.5 model. ### Requirements This repository contains Dockerfile which extends the MXNet NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: - [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) - [MXNet 22.10-py3 NGC container](https://ngc.nvidia.com/catalog/containers/nvidia%2Fmxnet) Supported GPUs: - [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) - [NVIDIA Turing architecture](https://www.nvidia.com/en-us/design-visualization/technologies/turing-architecture/) - [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning Documentation: - [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) - [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/frameworks/user-guide/index.html#accessing_registry) - [Running MXNet](https://docs.nvidia.com/deeplearning/frameworks/mxnet-release-notes/running.html#running) For those unable to use the MXNet NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the ResNet-50 model on the ImageNet 1k dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. Clone the repository. ```bash git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/MxNet/Classification/RN50v1.5 ``` 2. Build the ResNet-50 MXNet NGC container. After Docker is set up, you can build the ResNet-50 image with: ```bash docker build . -t nvidia_rn50_mx ``` 3. Start an interactive session in the NGC container to run preprocessing/training/inference. ```bash nvidia-docker run --rm -it --ipc=host -v <path to dataset>:/data/imagenet/train-val-recordio-passthrough nvidia_rn50_mx ``` 4. Download the data. * Download the images from `http://image-net.org/download-images`. * Extract the training and validation data: ```bash mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd .. mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash ``` 5. Preprocess the ImageNet 1k dataset. ```bash ./scripts/prepare_imagenet.sh <path to raw imagenet> <path where processed dataset will be created> ``` 6. Start training. ```bash ./runner -n <number of gpus> -b <batch size per GPU (default 192)> ``` 7. Start validation/evaluation. ```bash ./runner -n <number of gpus> -b <batch size per GPU (default 192)> --load <path to trained model> --mode val ``` 8. Start inference/predictions. ```bash ./runner --load <path to trained model> --mode pred --data-pred <path to the image> ``` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code In the root directory, the most important files are: * `runner`: A wrapper on the `train.py` script which is the main executable script for training/validation/predicting. * `benchmark.py`: A script for benchmarking. * `Dockerfile`: Container to build the container. * `fit.py`: A file containing most of the training and validation logic. * `data.py`: Data loading and preprocessing code. * `dali.py`: Data loading and preprocessing code using DALI. * `models.py`: The model architecture. * `report.py`: A file containing JSON report structure and description of fields. In the `scripts` directory, the most important files are: * `prepare_imagenet.sh`: A script that converts raw dataset format to RecordIO format. ### Parameters The complete list of available parameters contains: ``` Model: --arch {resnetv1,resnetv15,resnextv1,resnextv15,xception} model architecture (default: resnetv15) --num-layers NUM_LAYERS number of layers in the neural network, required by some networks such as resnet (default: 50) --num-groups NUM_GROUPS number of groups for grouped convolutions, required by some networks such as resnext (default: 32) --num-classes NUM_CLASSES the number of classes (default: 1000) --batchnorm-eps BATCHNORM_EPS the amount added to the batchnorm variance to prevent output explosion. (default: 1e-05) --batchnorm-mom BATCHNORM_MOM the leaky-integrator factor controling the batchnorm mean and variance. (default: 0.9) --fuse-bn-relu FUSE_BN_RELU have batchnorm kernel perform activation relu (default: 0) --fuse-bn-add-relu FUSE_BN_ADD_RELU have batchnorm kernel perform add followed by activation relu (default: 0) Training: --mode {train_val,train,val,pred} mode (default: train_val) --seed SEED random seed (default: None) --gpus GPUS list of gpus to run, e.g. 0 or 0,2,5 (default: [0]) --kv-store {device,horovod} key-value store type (default: device) --dtype {float32,float16} precision (default: float16) --amp If enabled, turn on AMP (Automatic Mixed Precision) (default: False) --batch-size BATCH_SIZE the batch size (default: 192) --num-epochs NUM_EPOCHS number of epochs (default: 90) --lr LR initial learning rate (default: 0.1) --lr-schedule {multistep,cosine} learning rate schedule (default: cosine) --lr-factor LR_FACTOR the ratio to reduce lr on each step (default: 0.256) --lr-steps LR_STEPS the epochs to reduce the lr, e.g. 30,60 (default: []) --warmup-epochs WARMUP_EPOCHS the epochs to ramp-up lr to scaled large-batch value (default: 5) --optimizer OPTIMIZER the optimizer type (default: sgd) --mom MOM momentum for sgd (default: 0.875) --wd WD weight decay for sgd (default: 3.0517578125e-05) --label-smoothing LABEL_SMOOTHING label smoothing factor (default: 0.1) --mixup MIXUP alpha parameter for mixup (if 0 then mixup is not applied) (default: 0) --disp-batches DISP_BATCHES show progress for every n batches (default: 20) --model-prefix MODEL_PREFIX model checkpoint prefix (default: model) --save-frequency SAVE_FREQUENCY frequency of saving model in epochs (--model-prefix must be specified). If -1 then save only best model. If 0 then do not save anything. (default: -1) --begin-epoch BEGIN_EPOCH start the model from an epoch (default: 0) --load LOAD checkpoint to load (default: None) --test-io test reading speed without training (default: False) --test-io-mode {train,val} data to test (default: train) --log LOG file where to save the log from the experiment (default: log.log) --dllogger-log DLLOGGER_LOG file where to save the dllogger log from the experiment (default: dllogger_log.log) --workspace WORKSPACE path to directory where results will be stored (default: ./) --no-metrics do not calculate evaluation metrics (for benchmarking) (default: False) --benchmark-iters BENCHMARK_ITERS run only benchmark-iters iterations from each epoch (default: None) Data: --data-train DATA_TRAIN the training data (default: None) --data-train-idx DATA_TRAIN_IDX the index of training data (default: ) --data-val DATA_VAL the validation data (default: None) --data-val-idx DATA_VAL_IDX the index of validation data (default: ) --data-pred DATA_PRED the image on which run inference (only for pred mode) (default: None) --data-backend {dali-gpu,dali-cpu,mxnet,synthetic} set data loading & augmentation backend (default: dali-gpu) --image-shape IMAGE_SHAPE the image shape feed into the network (default: [3, 224, 224]) --rgb-mean RGB_MEAN a tuple of size 3 for the mean rgb (default: [123.68, 116.779, 103.939]) --rgb-std RGB_STD a tuple of size 3 for the std rgb (default: [58.393, 57.12, 57.375]) --input-layout {NCHW,NHWC} the layout of the input data (default: NCHW) --conv-layout {NCHW,NHWC} the layout of the data assumed by the conv operation (default: NCHW) --batchnorm-layout {NCHW,NHWC} the layout of the data assumed by the batchnorm operation (default: NCHW) --pooling-layout {NCHW,NHWC} the layout of the data assumed by the pooling operation (default: NCHW) --num-examples NUM_EXAMPLES the number of training examples (doesn't work with mxnet data backend) (default: 1281167) --data-val-resize DATA_VAL_RESIZE base length of shorter edge for validation dataset (default: 256) DALI data backend: entire group applies only to dali data backend --dali-separ-val each process will perform independent validation on whole val-set (default: False) --dali-threads DALI_THREADS number of threadsper GPU for DALI (default: 3) --dali-validation-threads DALI_VALIDATION_THREADS number of threadsper GPU for DALI for validation (default: 10) --dali-prefetch-queue DALI_PREFETCH_QUEUE DALI prefetch queue depth (default: 2) --dali-nvjpeg-memory-padding DALI_NVJPEG_MEMORY_PADDING Memory padding value for nvJPEG (in MB) (default: 64) --dali-fuse-decoder DALI_FUSE_DECODER 0 or 1 whether to fuse decoder or not (default: 1) MXNet data backend: entire group applies only to mxnet data backend --data-mxnet-threads DATA_MXNET_THREADS number of threads for data decoding for mxnet data backend (default: 40) --random-crop RANDOM_CROP if or not randomly crop the image (default: 0) --random-mirror RANDOM_MIRROR if or not randomly flip horizontally (default: 1) --max-random-h MAX_RANDOM_H max change of hue, whose range is [0, 180] (default: 0) --max-random-s MAX_RANDOM_S max change of saturation, whose range is [0, 255] (default: 0) --max-random-l MAX_RANDOM_L max change of intensity, whose range is [0, 255] (default: 0) --min-random-aspect-ratio MIN_RANDOM_ASPECT_RATIO min value of aspect ratio, whose value is either None or a positive value. (default: 0.75) --max-random-aspect-ratio MAX_RANDOM_ASPECT_RATIO max value of aspect ratio. If min_random_aspect_ratio is None, the aspect ratio range is [1-max_random_aspect_ratio, 1+max_random_aspect_ratio], otherwise it is [min_random_aspect_ratio, max_random_aspect_ratio]. (default: 1.33) --max-random-rotate-angle MAX_RANDOM_ROTATE_ANGLE max angle to rotate, whose range is [0, 360] (default: 0) --max-random-shear-ratio MAX_RANDOM_SHEAR_RATIO max ratio to shear, whose range is [0, 1] (default: 0) --max-random-scale MAX_RANDOM_SCALE max ratio to scale (default: 1) --min-random-scale MIN_RANDOM_SCALE min ratio to scale, should >= img_size/input_shape. otherwise use --pad-size (default: 1) --max-random-area MAX_RANDOM_AREA max area to crop in random resized crop, whose range is [0, 1] (default: 1) --min-random-area MIN_RANDOM_AREA min area to crop in random resized crop, whose range is [0, 1] (default: 0.05) --min-crop-size MIN_CROP_SIZE Crop both width and height into a random size in [min_crop_size, max_crop_size] (default: -1) --max-crop-size MAX_CROP_SIZE Crop both width and height into a random size in [min_crop_size, max_crop_size] (default: -1) --brightness BRIGHTNESS brightness jittering, whose range is [0, 1] (default: 0) --contrast CONTRAST contrast jittering, whose range is [0, 1] (default: 0) --saturation SATURATION saturation jittering, whose range is [0, 1] (default: 0) --pca-noise PCA_NOISE pca noise, whose range is [0, 1] (default: 0) --random-resized-crop RANDOM_RESIZED_CROP whether to use random resized crop (default: 1) ``` ### Command-line options To see the full list of available options and their descriptions, use the `-h` or `--help` command line option: `./runner --help` and `python train.py --help` `./runner` acts as a wrapper on `train.py` and all additional flags will be passed to `train.py`. ### Getting the data The MXNet ResNet-50 v1.5 script operates on ImageNet 1k, a widely popular image classification dataset from ILSVRC challenge. You can download the images from `http://image-net.org/download-images`. The recommended data format is [RecordIO](https://mxnet.apache.org/versions/1.7.0/api/architecture/note_data_loading), which concatenates multiple examples into seekable binary files for better read efficiency. MXNet provides a tool called `im2rec.py` located in the `/opt/mxnet/tools/` directory. The tool converts individual images into `.rec` files. To prepare a RecordIO file containing ImageNet data, we first need to create `.lst` files which consist of the labels and image paths. We assume that the original images were downloaded to `/data/imagenet/raw/train-jpeg` and `/data/imagenet/raw/val-jpeg`. ```bash python /opt/mxnet/tools/im2rec.py --list --recursive train /data/imagenet/raw/train-jpeg python /opt/mxnet/tools/im2rec.py --list --recursive val /data/imagenet/raw/val-jpeg ``` Next, we generate the `.rec` (RecordIO files with data) and `.idx` (indexes required by DALI to speed up data loading) files. To obtain the best training accuracy we do not preprocess the images when creating the RecordIO file. ```bash python /opt/mxnet/tools/im2rec.py --pass-through --num-thread 40 train /data/imagenet/raw/train-jpeg python /opt/mxnet/tools/im2rec.py --pass-through --num-thread 40 val /data/imagenet/raw/val-jpeg ``` #### Dataset guidelines The process of loading, normalizing, and augmenting the data contained in the dataset can be found in the `data.py` and `dali.py` files. The data is read from RecordIO format, which concatenates multiple examples into seekable binary files for better read efficiency. Data augmentation techniques are described in the [Default configuration](#default-configuration) section. #### Multi-dataset In most cases, to train a model on a different dataset, no changes in the code are required, but the dataset has to be converted into RecordIO format. To convert a custom dataset, follow the steps from [Getting the data](#getting-the-data) section, and refer to the `scripts/prepare_dataset.py` script. ### Training process To start training, run: `./runner -n <number of gpus> -b <batch size per GPU> --data-root <path to imagenet> --dtype <float32 or float16>` By default, the training script runs the validation after each epoch: * The best checkpoint will be stored in the `model_best.params` file in the working directory. * The log from training will be saved in the `log.log` file in the working directory. * The JSON report with statistics will be saved in the `report.json` file in the working directory. If ImageNet is mounted in the `/data/imagenet/train-val-recordio-passthrough` directory, you don't have to specify the `--data-root` flag. ### Inference process To start validation, run: `./runner -n <number of gpus> -b <batch size per GPU> --data-root <path to imagenet> --dtype <float32 or float16> --mode val` By default: * The log from validation will be saved in the `log.log` file in the working directory. * The JSON report with statistics will be saved in the `report.json` file in the working directory. ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking To benchmark training and inference, run: `python benchmark.py -n <numbers of gpus separated by comma> -b <batch sizes per GPU separated by comma> --data-root <path to imagenet> --dtype <float32 or float16> -o <path to benchmark report>` * To control the benchmark length per epoch, use the `-i` flag (defaults to 100 iterations). * To control the number of epochs, use the `-e` flag. * To control the number of warmup epochs (epochs which are not taken into account), use the `-w` flag. * To limit the length of the dataset, use the `--num-examples` flag. By default, the same parameters as in `./runner` will be used. Additional flags will be passed to `./runner`. #### Training performance benchmark To benchmark only training, use the `--mode train` flag. #### Inference performance benchmark To benchmark only inference, use the `--mode val` flag. ### Results The following sections provide details on how we achieved our performance and accuracy in training and inference. #### Training accuracy results ##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) **90 epochs configuration** Our results were obtained by running 8 times the `./runner -n <number of gpus> -b 512 --dtype float32` script for TF32 and the `./runner -n <number of gpus> -b 512` script for mixed precision in the mxnet-22.10-py3 NGC container on NVIDIA DGX A100 with (8x A100 80GB) GPUs. | **GPUs** | **Accuracy - mixed precision** | **Accuracy - TF32** | **Time to train - mixed precision** | **Time to train - TF32** | **Time to train - speedup** | |:---:|:---:|:---:|:--:|:---:|:---:| |1|77.185|77.184|8.75|29.39|3.36| |8|77.185|77.184|1.14|3.82|3.35| ##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB) **90 epochs configuration** Our results were obtained by running the `./runner -n <number of gpus> -b 96 --dtype float32` training script for FP32 and the `./runner -n <number of gpus> -b 192` training script for mixed precision in the mxnet-22.10-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. | **GPUs** | **Accuracy - mixed precision** | **Accuracy - FP32** | **Time to train - mixed precision** | **Time to train - FP32** | **Time to train - speedup** | |:---:|:---:|:---:|:---:|:---:|:---:| |1|77.342|77.160|24.2|84.5|3.49| |4|77.196|77.290|6.0|21.4|3.59| |8|77.150|77.313|3.0|10.7|3.54| ##### Training stability test Our results were obtained by running the following commands 8 times with different seeds. * For 50 epochs * `./runner -n 8 -b 96 --dtype float32 --num-epochs 50` for FP32 * `./runner -n 8 -b 192 --num-epochs 50` for mixed precision * For 90 epochs * `./runner -n 8 -b 96 --dtype float32` for FP32 * `./runner -n 8 -b 192` for mixed precision * For 250 epochs * `./runner -n 8 -b 96 --dtype float32 --num-epochs 250 --mixup 0.2` for FP32 * `./runner -n 8 -b 192 --num-epochs 250 --mixup 0.2` for mixed precision | **# of epochs** | **mixed precision avg top1** | **FP32 avg top1** | **mixed precision standard deviation** | **FP32 standard deviation** | **mixed precision minimum top1** | **FP32 minimum top1** | **mixed precision maximum top1** | **FP32 maximum top1** | |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| |50|76.308|76.329|0.00073|0.00094|76.230|76.234|76.440|76.470| |90|77.150|77.313|0.00098|0.00085|76.972|77.228|77.266|77.474| |250|78.460|78.483|0.00078|0.00065|78.284|78.404|78.560|78.598| **Plots for 250 epoch configuration** Here are example graphs of FP32 and mixed precision training on 8 GPU 250 epochs configuration: ![TrainingLoss](./img/dgx1-16g_250e_training_loss.png) ![TrainingAccuracy](./img/dgx1-16g_250e_validation_top1.png) ![ValidationAccuracy](./img/dgx1-16g_250e_validation_top5.png) #### Training performance results ##### Training performance: NVIDIA DGX A100 (8x A100 80GB) The following results were obtained by running the `python benchmark.py -n 1,4,8 -b 512 --dtype float32 -o benchmark_report_tf32.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for TF32 and the `python benchmark.py -n 1,4,8 -b 512 --dtype float16 -o benchmark_report_fp16.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for mixed precision in the mxnet-22.10-py3 NGC container on NVIDIA DGX A100 with (8x A100 80GB) GPUs. Training performance reported as Total IPS (data + compute time taken into account). Weak scaling is calculated as a ratio of speed for given number of GPUs to speed for 1 GPU. | **GPUs** | **Throughput - mixed precision** | **Throughput - TF32** | **Throughput speedup (TF32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - TF32** | |:---:|:---:|:---:|:---:|:---:|:---:| |1|3410.52 |1055.78 |2.18 |1.00 |1.00 | |4|13442.66 |4182.30 |3.24 |3.97 |3.96 | |8|26673.72|8247.44 |3.23 |7.82 |7.81 | ##### Training performance: NVIDIA DGX-1 (8x V100 16GB) The following results were obtained by running the `python benchmark.py -n 1,2,4,8 -b 192 --dtype float16 -o benchmark_report_fp16.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for mixed precision and the `python benchmark.py -n 1,2,4,8 -b 96 --dtype float32 -o benchmark_report_fp32.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for FP32 in the mxnet-20.12-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Training performance reported as Total IPS (data + compute time taken into account). Weak scaling is calculated as a ratio of speed for given number of GPUs to speed for 1 GPU. | **GPUs** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:---:|:---:|:---:|:---:|:---:|:---:| |1|1376 |384 |3.58 |1.00 |1.00 | |2|2768 |763 |3.62 |2.01 |1.98 | |4|5357 |1513 |3.54 |3.89 |3.94 | |8|10723 |3005 |3.56 |7.79 |7.82 | ##### Training performance: NVIDIA DGX-2 (16x V100 32GB) The following results were obtained by running the `python benchmark.py -n 1,2,4,8,16 -b 256 --dtype float16 -o benchmark_report_fp16.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for mixed precision and the `python benchmark.py -n 1,2,4,8,16 -b 128 --dtype float32 -o benchmark_report_fp32.json -i 500 -e 3 -w 1 --num-examples 32000 --mode train` script for FP32 in the mxnet-20.12-py3 NGC container on NVIDIA DGX-2 with (16x V100 32GB) GPUs. Training performance reported as Total IPS (data + compute time taken into account). Weak scaling is calculated as a ratio of speed for given number of GPUs to speed for 1 GPU. | **GPUs** | **Throughput - mixed precision** | **Throughput - FP32** | **Throughput speedup (FP32 - mixed precision)** | **Weak scaling - mixed precision** | **Weak scaling - FP32** | |:---:|:---:|:---:|:---:|:---:|:---:| |1 |1492 |417 |3.57 |1.00 |1.00 | |2 |2935 |821 |3.57 |1.96 |1.96 | |4 |5726 |1623 |3.52 |3.83 |3.92 | |8 |11368|3223 |3.52 |7.61 |7.72 | |16|21484|6338 |3.38 |14.39|15.19| #### Inference performance results ##### Inference performance: NVIDIA DGX A100 (1x A100 80GB) The following results were obtained by running the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float16 -o inferbenchmark_report_fp16.json -i 500 -e 3 -w 1 --mode val` script for mixed precision and the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float32 -o inferbenchmark_report_tf32.json -i 500 -e 3 -w 1 --mode val` script for TF32 in the mxnet-22.10-py3 NGC container on NVIDIA DGX A100 with (8x A100 80GB) GPUs. Inference performance reported as Total IPS (data + compute time taken into account). Reported mixed precision speedups are relative to TF32 numbers for corresponding configuration. | **Batch size** | **Throughput (img/sec) - mixed precision** | **Throughput - speedup** | **Avg latency (ms) - mixed precision** | **Avg latency - speedup** | **50% latency (ms) - mixed precision** | **50% latency - speedup** | **90% latency (ms) - mixed precision** | **90% latency - speedup** | **95% latency (ms) - mixed precision** | **95% latency - speedup** | **99% latency (ms) - mixed precision** | **99% latency - speedup** | |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | 1 | 1431.99 | 1.9 | 0.7 | 1.9 | 0.68 | 1.95 | 0.71 | 1.9 | 0.84 | 1.65 | 0.88 | 1.7 | | 2 | 2530.66 | 2.19 | 0.79 | 2.19 | 0.74 | 2.31 | 0.86 | 2.05 | 0.93 | 2.0 | 2.0 | 0.97 | | 4 | 3680.74 | 2.11 | 1.09 | 2.11 | 0.92 | 2.49 | 1.21 | 1.98 | 1.64 | 1.51 | 6.03 | 0.45 | | 8 | 2593.88 | 1.11 | 3.08 | 1.11 | 2.89 | 1.17 | 4.09 | 0.89 | 4.72 | 0.8 | 9.85 | 0.55 | | 16 | 4340.08 | 1.52 | 3.69 | 1.52 | 3.31 | 1.68 | 4.73 | 1.24 | 6.3 | 0.95 | 12.31 | 0.54 | | 32 | 6808.22 | 2.1 | 4.7 | 2.1 | 4.0 | 2.46 | 6.44 | 1.58 | 9.01 | 1.15 | 15.88 | 0.68 | | 64 | 7659.96 | 2.21 | 8.36 | 2.21 | 7.44 | 2.48 | 10.76 | 1.75 | 13.91 | 1.37 | 21.96 | 0.9 | | 128 | 8017.67 | 2.23 | 15.96 | 2.23 | 15.0 | 2.37 | 18.95 | 1.9 | 21.65 | 1.67 | 30.36 | 1.23 | | 192 | 8240.8 | 2.26 | 23.3 | 2.26 | 22.49 | 2.33 | 25.65 | 2.07 | 27.54 | 1.94 | 37.19 | 1.5 | | 256 | 7909.62 | 2.15 | 32.37 | 2.15 | 31.66 | 2.2 | 34.27 | 2.05 | 37.02 | 1.9 | 42.83 | 1.66 | | 512 | 7213.43 | 2.07 | 70.98 | 2.07 | 70.48 | 2.08 | 73.21 | 2.04 | 74.38 | 2.03 | 79.15 | 1.99 | ##### Inference performance: NVIDIA DGX-1 (1x V100 16GB) The following results were obtained by running the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float16 -o inferbenchmark_report_fp16.json -i 500 -e 3 -w 1 --mode val` script for mixed precision and the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float32 -o inferbenchmark_report_fp32.json -i 500 -e 3 -w 1 --mode val` script for FP32 in the mxnet-20.12-py3 NGC container on NVIDIA DGX-1 with (8x V100 16GB) GPUs. Inference performance reported as Total IPS (data + compute time taken into account). Reported mixed precision speedups are relative to FP32 numbers for corresponding configuration. | **Batch size** | **Throughput (img/sec) - mixed precision** | **Throughput - speedup** | **Avg latency (ms) - mixed precision** | **Avg latency - speedup** | **50% latency (ms) - mixed precision** | **50% latency - speedup** | **90% latency (ms) - mixed precision** | **90% latency - speedup** | **95% latency (ms) - mixed precision** | **95% latency - speedup** | **99% latency (ms) - mixed precision** | **99% latency - speedup** | |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | 1 | 286 | 1.27 | 3.48 | 1.27 | 3.45 | 1.27 | 3.61 | 1.26| 3.68 | 1.26| 3.86 | 1.24| | 2 | 519 | 1.34 | 3.84 | 1.34 | 3.77 | 1.35 | 4.05 | 1.31| 4.16 | 1.29| 4.59 | 1.27| | 4 | 910 | 1.60 | 4.39 | 1.60 | 4.35 | 1.61 | 4.59 | 1.56| 4.66 | 1.56| 5.19 | 1.47| | 8 | 1642| 2.20 | 4.87 | 2.20 | 4.68 | 2.29 | 5.35 | 2.05| 6.01 | 1.84| 11.06| 1.04| | 16 | 2359| 2.55 | 6.78 | 2.55 | 6.49 | 2.66 | 7.07 | 2.48| 8.33 | 2.12| 13.89| 1.30| | 32 | 2902| 2.86 | 11.02| 2.86 | 10.43| 3.02 | 12.25| 2.60| 13.88| 2.31| 21.41| 1.55| | 64 | 3234| 2.74 | 19.78| 2.74 | 18.89| 2.86 | 22.50| 2.44| 25.38| 2.17| 30.78| 1.81| | 128 | 3362| 2.69 | 38.06| 2.69 | 37.20| 2.75 | 42.32| 2.44| 45.12| 2.30| 50.59| 2.07| | 192 | 3178| 2.52 | 60.40| 2.52 | 59.62| 2.55 | 65.56| 2.35| 68.16| 2.25| 73.72| 2.10| | 256 | 3057| 2.38 | 83.73| 2.38 | 82.77| 2.40 | 92.26| 2.24| 92.26| 2.17|100.84| 2.23| ##### Inference performance: NVIDIA T4 The following results were obtained by running the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float16 -o inferbenchmark_report_fp16.json -i 500 -e 3 -w 1 --mode val` script for mixed precision and the `python benchmark.py -n 1 -b 1,2,4,8,16,32,64,128,192,256 --dtype float32 -o inferbenchmark_report_fp32.json -i 500 -e 3 -w 1 --mode val` script for FP32 in the mxnet-20.12-py3 NGC container on an NVIDIA T4 GPU. Inference performance reported as Total IPS (data + compute time taken into account). Reported mixed precision speedups are relative to FP32 numbers for corresponding configuration. | **Batch size** | **Throughput (img/sec) - mixed precision** | **Throughput - speedup** | **Avg latency (ms) - mixed precision** | **Avg latency - speedup** | **50% latency (ms) - mixed precision** | **50% latency - speedup** | **90% latency (ms) - mixed precision** | **90% latency - speedup** | **95% latency (ms) - mixed precision** | **95% latency - speedup** | **99% latency (ms) - mixed precision** | **99% latency - speedup** | |:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | 1 | 131 | 1.11 | 7.61 | 1.17 | 7.10 | 0.97 | 10.28 | 0.92 | 11.35 | 0.95 | 15.05 | 0.96 | | 2 | 277 | 1.48 | 7.20 | 1.53 | 7.30 | 1.19 | 7.74 | 1.48 | 8.82 | 1.49 | 12.09 | 1.58 | | 4 | 374 | 1.47 | 10.67| 1.50 | 10.20| 1.40 | 13.51 | 1.09 | 14.82 | 1.03 | 22.36 | 0.74 | | 8 | 672 | 2.21 | 11.90| 2.23 | 11.21| 2.21 | 14.54 | 1.74 | 17.24 | 1.48 | 28.65 | 0.92 | | 16 | 1267| 3.57 | 12.62| 3.58 | 12.02| 3.59 | 14.02 | 3.13 | 16.02 | 2.76 | 22.28 | 2.01 | | 32 | 1473| 3.85 | 21.71| 3.86 | 21.67| 3.76 | 22.63 | 3.64 | 22.98 | 3.60 | 23.85 | 3.52 | | 64 | 1561| 3.70 | 40.98| 3.70 | 40.87| 3.64 | 41.98 | 3.57 | 42.56 | 3.53 | 43.85 | 3.46 | | 128 | 1555| 3.60 | 82.26| 3.60 | 81.86| 3.57 | 83.87 | 3.51 | 84.63 | 3.49 | 96.56 | 3.09 | | 192 | 1545| 3.64 |124.26| 3.64 |123.67| 3.61 |125.76 | 3.58 |126.73 | 3.56 |143.27 | 3.19 | | 256 | 1559| 3.71 |164.15| 3.71 |163.97| 3.71 |166.28 | 3.70 |167.01 | 3.70 |168.54 | 3.69 | ## Release notes ### Changelog 1. Dec, 2018 * Initial release (based on https://github.com/apache/incubator-mxnet/tree/v1.8.x/example/image-classification) 2. June, 2019 * Code refactor * Label smoothing * Cosine LR schedule * MixUp regularization * Better configurations 3. February, 2021 * DGX-A100 performance results * Container version upgraded to 20.12 4. December, 2022 * Container version upgraded to 22.10 * Updated the A100 performance results. V100 and T4 performance results reflect the performance using the 20.12 container ### Known Issues There are no known issues with this model.
TensorFlow2/LanguageModeling/ELECTRA/data/glue
glue
download_mrpc
#!/usr/bin/env bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. echo "Downloading MRPC data" wget https://gist.githubusercontent.com/W4ngatang/60c2bdb54d156a41194446737ce03e2e/raw/17b8dd0d724281ed7c3b2aeeda662b92809aadd5/download_glue_data.py python download_glue_data.py --data_dir . --tasks MRPC
PyTorch/Recommendation/DLRM/triton
triton
client
#!/usr/bin/env python # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import argparse import json import sys import numpy as np import torch import tritonclient.http as http_client from sklearn.metrics import roc_auc_score from tqdm import tqdm from dlrm.data.datasets import SyntheticDataset, SplitCriteoDataset from dlrm.utils.distributed import get_device_mapping def get_data_loader(batch_size, *, data_path, model_config): with open(model_config.dataset_config) as f: categorical_sizes = list(json.load(f).values()) categorical_sizes = [s + 1 for s in categorical_sizes] device_mapping = get_device_mapping(categorical_sizes, num_gpus=1) if data_path: data = SplitCriteoDataset( data_path=data_path, batch_size=batch_size, numerical_features=True, categorical_features=device_mapping['embedding'][0], categorical_feature_sizes=categorical_sizes, prefetch_depth=1, drop_last_batch=model_config.drop_last_batch ) else: data = SyntheticDataset( num_entries=batch_size * 1024, batch_size=batch_size, numerical_features=model_config.num_numerical_features, categorical_feature_sizes=categorical_sizes, device="cpu" ) if model_config.test_batches > 0: data = torch.utils.data.Subset(data, list(range(model_config.test_batches))) return torch.utils.data.DataLoader(data, batch_size=None, num_workers=0, pin_memory=False) def run_infer(model_name, model_version, numerical_features, categorical_features, headers=None): inputs = [] outputs = [] num_type = "FP16" if numerical_features.dtype == np.float16 else "FP32" inputs.append(http_client.InferInput('input__0', numerical_features.shape, num_type)) inputs.append(http_client.InferInput('input__1', categorical_features.shape, "INT64")) # Initialize the data inputs[0].set_data_from_numpy(numerical_features, binary_data=True) inputs[1].set_data_from_numpy(categorical_features, binary_data=False) outputs.append(http_client.InferRequestedOutput('output__0', binary_data=True)) results = triton_client.infer(model_name, inputs, model_version=str(model_version) if model_version != -1 else '', outputs=outputs, headers=headers) return results if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--triton-server-url', type=str, required=True, help='URL adress of triton server (with port)') parser.add_argument('--triton-model-name', type=str, required=True, help='Triton deployed model name') parser.add_argument('--triton-model-version', type=int, default=-1, help='Triton model version') parser.add_argument('-v', '--verbose', action="store_true", required=False, default=False, help='Enable verbose output') parser.add_argument('-H', dest='http_headers', metavar="HTTP_HEADER", required=False, action='append', help='HTTP headers to add to inference server requests. ' + 'Format is -H"Header:Value".') parser.add_argument("--dataset_config", type=str, required=True) parser.add_argument("--inference_data", type=str, help="Path to file with inference data.") parser.add_argument("--batch_size", type=int, default=1, help="Inference request batch size") parser.add_argument("--drop_last_batch", type=bool, default=True, help="Drops the last batch size if it's not full") parser.add_argument("--fp16", action="store_true", default=False, help="Use 16bit for numerical input") parser.add_argument("--test_batches", type=int, default=0, help="Specifies number of batches used in the inference") FLAGS = parser.parse_args() try: triton_client = http_client.InferenceServerClient(url=FLAGS.triton_server_url, verbose=FLAGS.verbose) except Exception as e: print("channel creation failed: " + str(e)) sys.exit(1) if FLAGS.http_headers is not None: headers_dict = {l.split(':')[0]: l.split(':')[1] for l in FLAGS.http_headers} else: headers_dict = None triton_client.load_model(FLAGS.triton_model_name) if not triton_client.is_model_ready(FLAGS.triton_model_name): sys.exit(1) dataloader = get_data_loader(FLAGS.batch_size, data_path=FLAGS.inference_data, model_config=FLAGS) results = [] tgt_list = [] for numerical_features, categorical_features, target in tqdm(dataloader): numerical_features = numerical_features.cpu().numpy() numerical_features = numerical_features.astype(np.float16 if FLAGS.fp16 else np.float32) categorical_features = categorical_features.long().cpu().numpy() output = run_infer(FLAGS.triton_model_name, FLAGS.triton_model_version, numerical_features, categorical_features, headers_dict) results.append(output.as_numpy('output__0')) tgt_list.append(target.cpu().numpy()) results = np.concatenate(results).squeeze() tgt_list = np.concatenate(tgt_list) score = roc_auc_score(tgt_list, results) print(f"Model score: {score}") statistics = triton_client.get_inference_statistics(model_name=FLAGS.triton_model_name, headers=headers_dict) print(statistics) if len(statistics['model_stats']) != 1: print("FAILED: Inference Statistics") sys.exit(1)
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/evaluation
evaluation
evaluation_AMP_V100-32G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode eval \ --use_amp \ --use_xla \ --eval_batch_size 128 \ --eval_img_size 384 \ --model_dir ./output/expXX \ --n_repeat_eval 4 \ --moving_average_decay 0.9999 # enables evaluation using EMA weights too
TensorFlow2/Recommendation/WideAndDeep/triton/runner
runner
pipeline
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, Tuple # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .stages import ( ConversionStage, DeployStage, ExportStage, ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage, TritonPreparePerformanceProfilingDataStage, ) class Pipeline: """ Definition of stages that has to be executed before and during experiments """ # Stages to execute as part of single experiment _experiment_stages = [ ExportStage.label, ConversionStage.label, DeployStage.label, TritonPreparePerformanceProfilingDataStage.label, TritonPerformanceOfflineStage.label, TritonPerformanceOnlineStage.label, ] def __init__(self): """ Initialize pipeline """ self._stages: Dict = dict() def model_export(self, commands: Tuple[str, ...]) -> None: """ Model export stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ExportStage(commands=commands) self._stages[stage.label] = stage def model_conversion(self, commands: Tuple[str, ...]) -> None: """ Model conversion stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = ConversionStage(commands=commands) self._stages[stage.label] = stage def model_deploy(self, commands: Tuple[str, ...]) -> None: """ Model deployment stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = DeployStage(commands=commands) self._stages[stage.label] = stage def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None: """ Model profiling data creation stage Args: commands: Commands to be executed as part of stage Returns: None """ stage = TritonPreparePerformanceProfilingDataStage(commands=commands) self._stages[stage.label] = stage def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance offline test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOfflineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE, ) self._stages[stage.label] = stage def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None: """ Model performance online test stage Args: commands: Commands to be executed as part of stage result_path: Path where results file is stored Returns: None """ stage = TritonPerformanceOnlineStage( commands=commands, result_path=result_path, result_type=ResultsType.TRITON_PERFORMANCE_ONLINE, ) self._stages[stage.label] = stage def stages(self): """ Generate stages which should be run per experiment Returns: Generator with stages object """ for stage_name in self._experiment_stages: stage = self._stages.get(stage_name) if not stage: continue yield stage
PyTorch/SpeechSynthesis/HiFiGAN/platform
platform
DGXA100_HiFi-GAN_AMP_8GPU
#!/bin/bash set -a : ${NUM_GPUS:=8} : ${BATCH_SIZE:=16} : ${GRAD_ACCUMULATION:=1} : ${AMP:=true} bash scripts/train_lj22khz.sh "$@" --no_amp_grouped_conv
TensorFlow2/Classification/ConvNets/utils
utils
setup
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import tensorflow as tf import horovod.tensorflow as hvd def set_flags(params): # os.environ['CUDA_CACHE_DISABLE'] = '1' os.environ['HOROVOD_GPU_ALLREDUCE'] = 'NCCL' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # os.environ['TF_GPU_THREAD_MODE'] = 'gpu_private' # os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '0' os.environ['TF_ADJUST_HUE_FUSED'] = '1' os.environ['TF_ADJUST_SATURATION_FUSED'] = '1' os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # os.environ['TF_SYNC_ON_FINISH'] = '0' os.environ['TF_AUTOTUNE_THRESHOLD'] = '2' os.environ['HOROVOD_CACHE_CAPACITY'] = "0" os.environ['HOROVOD_CYCLE_TIME'] = "1.0" if params.intraop_threads: os.environ['TF_NUM_INTRAOP_THREADS'] = str(params.intraop_threads) if params.interop_threads: os.environ['TF_NUM_INTEROP_THREADS'] = str(params.interop_threads) if params.use_xla: # it turns out tf_xla_enable_lazy_compilation is used before running main.py, so setting this flag # in the current function would have no effect. Thus, this flag is already set in Dockerfile. The # remaining XLA flags are set here. TF_XLA_FLAGS = os.environ['TF_XLA_FLAGS'] # contains tf_xla_enable_lazy_compilation # we set tf_xla_async_io_level=0 for 2 reasons: 1) It turns out that XLA doesn't like # hvd.allreduce ops used in the custom train_step. Because of this issue, training never started. # 2) XLA doesn't like the tf.cond used in conditional mixing (model module). # remove async flag since it's obsolete #os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1 --tf_xla_async_io_level=0" os.environ['TF_XLA_FLAGS'] = TF_XLA_FLAGS + " --tf_xla_auto_jit=1" os.environ['TF_EXTRA_PTXAS_OPTIONS'] = "-sw200428197=true" tf.keras.backend.clear_session() tf.config.optimizer.set_jit(True) gpus = tf.config.experimental.list_physical_devices('GPU') tf.config.experimental.set_visible_devices(gpus, 'GPU') if params.memory_limit: for gpu in gpus: tf.config.experimental.set_virtual_device_configuration(gpu, [ tf.config.experimental.VirtualDeviceConfiguration(memory_limit=params.memory_limit)]) else: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) assert tf.config.experimental.get_memory_growth(gpu) if gpus: tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') np.random.seed(params.seed) tf.random.set_seed(params.seed) if params.use_amp: # Model.compile will automatically wrap an optimizer with a tf.keras.mixed_precision.LossScaleOptimizer # if you use the 'mixed_float16' policy. If you use a custom training loop instead of calling Model.compile, # you should explicitly use a tf.keras.mixed_precision.LossScaleOptimizer to avoid numeric underflow with float16. policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16', loss_scale='dynamic') tf.keras.mixed_precision.experimental.set_policy(policy) else: os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '0'
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
ops_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.utils.ops.""" import numpy as np import tensorflow as tf from object_detection.core import standard_fields as fields from object_detection.utils import ops from object_detection.utils import test_case slim = tf.contrib.slim class NormalizedToImageCoordinatesTest(tf.test.TestCase): def test_normalized_to_image_coordinates(self): normalized_boxes = tf.placeholder(tf.float32, shape=(None, 1, 4)) normalized_boxes_np = np.array([[[0.0, 0.0, 1.0, 1.0]], [[0.5, 0.5, 1.0, 1.0]]]) image_shape = tf.convert_to_tensor([1, 4, 4, 3], dtype=tf.int32) absolute_boxes = ops.normalized_to_image_coordinates(normalized_boxes, image_shape, parallel_iterations=2) expected_boxes = np.array([[[0, 0, 4, 4]], [[2, 2, 4, 4]]]) with self.test_session() as sess: absolute_boxes = sess.run(absolute_boxes, feed_dict={normalized_boxes: normalized_boxes_np}) self.assertAllEqual(absolute_boxes, expected_boxes) class ReduceSumTrailingDimensions(tf.test.TestCase): def test_reduce_sum_trailing_dimensions(self): input_tensor = tf.placeholder(tf.float32, shape=[None, None, None]) reduced_tensor = ops.reduce_sum_trailing_dimensions(input_tensor, ndims=2) with self.test_session() as sess: reduced_np = sess.run(reduced_tensor, feed_dict={input_tensor: np.ones((2, 2, 2), np.float32)}) self.assertAllClose(reduced_np, 2 * np.ones((2, 2), np.float32)) class MeshgridTest(tf.test.TestCase): def test_meshgrid_numpy_comparison(self): """Tests meshgrid op with vectors, for which it should match numpy.""" x = np.arange(4) y = np.arange(6) exp_xgrid, exp_ygrid = np.meshgrid(x, y) xgrid, ygrid = ops.meshgrid(x, y) with self.test_session() as sess: xgrid_output, ygrid_output = sess.run([xgrid, ygrid]) self.assertAllEqual(xgrid_output, exp_xgrid) self.assertAllEqual(ygrid_output, exp_ygrid) def test_meshgrid_multidimensional(self): np.random.seed(18) x = np.random.rand(4, 1, 2).astype(np.float32) y = np.random.rand(2, 3).astype(np.float32) xgrid, ygrid = ops.meshgrid(x, y) grid_shape = list(y.shape) + list(x.shape) self.assertEqual(xgrid.get_shape().as_list(), grid_shape) self.assertEqual(ygrid.get_shape().as_list(), grid_shape) with self.test_session() as sess: xgrid_output, ygrid_output = sess.run([xgrid, ygrid]) # Check the shape of the output grids self.assertEqual(xgrid_output.shape, tuple(grid_shape)) self.assertEqual(ygrid_output.shape, tuple(grid_shape)) # Check a few elements test_elements = [((3, 0, 0), (1, 2)), ((2, 0, 1), (0, 0)), ((0, 0, 0), (1, 1))] for xind, yind in test_elements: # These are float equality tests, but the meshgrid op should not introduce # rounding. self.assertEqual(xgrid_output[yind + xind], x[xind]) self.assertEqual(ygrid_output[yind + xind], y[yind]) class OpsTestFixedPadding(tf.test.TestCase): def test_3x3_kernel(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.fixed_padding(tensor, 3) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape) def test_5x5_kernel(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.fixed_padding(tensor, 5) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape) def test_3x3_atrous_kernel(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.fixed_padding(tensor, 3, 2) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 6, 6, 1), padded_tensor_out.shape) class OpsTestPadToMultiple(tf.test.TestCase): def test_zero_padding(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.pad_to_multiple(tensor, 1) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) def test_no_padding(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.pad_to_multiple(tensor, 2) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) def test_non_square_padding(self): tensor = tf.constant([[[[0.], [0.]]]]) padded_tensor = ops.pad_to_multiple(tensor, 2) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 2, 2, 1), padded_tensor_out.shape) def test_padding(self): tensor = tf.constant([[[[0.], [0.]], [[0.], [0.]]]]) padded_tensor = ops.pad_to_multiple(tensor, 4) with self.test_session() as sess: padded_tensor_out = sess.run(padded_tensor) self.assertEqual((1, 4, 4, 1), padded_tensor_out.shape) class OpsTestPaddedOneHotEncoding(tf.test.TestCase): def test_correct_one_hot_tensor_with_no_pad(self): indices = tf.constant([1, 2, 3, 5]) one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=0) expected_tensor = np.array([[0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1]], np.float32) with self.test_session() as sess: out_one_hot_tensor = sess.run(one_hot_tensor) self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10, atol=1e-10) def test_correct_one_hot_tensor_with_pad_one(self): indices = tf.constant([1, 2, 3, 5]) one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=1) expected_tensor = np.array([[0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1]], np.float32) with self.test_session() as sess: out_one_hot_tensor = sess.run(one_hot_tensor) self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10, atol=1e-10) def test_correct_one_hot_tensor_with_pad_three(self): indices = tf.constant([1, 2, 3, 5]) one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=3) expected_tensor = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 1]], np.float32) with self.test_session() as sess: out_one_hot_tensor = sess.run(one_hot_tensor) self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10, atol=1e-10) def test_correct_padded_one_hot_tensor_with_empty_indices(self): depth = 6 pad = 2 indices = tf.constant([]) one_hot_tensor = ops.padded_one_hot_encoding( indices, depth=depth, left_pad=pad) expected_tensor = np.zeros((0, depth + pad)) with self.test_session() as sess: out_one_hot_tensor = sess.run(one_hot_tensor) self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10, atol=1e-10) def test_return_none_on_zero_depth(self): indices = tf.constant([1, 2, 3, 4, 5]) one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=0, left_pad=2) self.assertEqual(one_hot_tensor, None) def test_raise_value_error_on_rank_two_input(self): indices = tf.constant(1.0, shape=(2, 3)) with self.assertRaises(ValueError): ops.padded_one_hot_encoding(indices, depth=6, left_pad=2) def test_raise_value_error_on_negative_pad(self): indices = tf.constant(1.0, shape=(2, 3)) with self.assertRaises(ValueError): ops.padded_one_hot_encoding(indices, depth=6, left_pad=-1) def test_raise_value_error_on_float_pad(self): indices = tf.constant(1.0, shape=(2, 3)) with self.assertRaises(ValueError): ops.padded_one_hot_encoding(indices, depth=6, left_pad=0.1) def test_raise_value_error_on_float_depth(self): indices = tf.constant(1.0, shape=(2, 3)) with self.assertRaises(ValueError): ops.padded_one_hot_encoding(indices, depth=0.1, left_pad=2) class OpsDenseToSparseBoxesTest(tf.test.TestCase): def test_return_all_boxes_when_all_input_boxes_are_valid(self): num_classes = 4 num_valid_boxes = 3 code_size = 4 dense_location_placeholder = tf.placeholder(tf.float32, shape=(num_valid_boxes, code_size)) dense_num_boxes_placeholder = tf.placeholder(tf.int32, shape=(num_classes)) box_locations, box_classes = ops.dense_to_sparse_boxes( dense_location_placeholder, dense_num_boxes_placeholder, num_classes) feed_dict = {dense_location_placeholder: np.random.uniform( size=[num_valid_boxes, code_size]), dense_num_boxes_placeholder: np.array([1, 0, 0, 2], dtype=np.int32)} expected_box_locations = feed_dict[dense_location_placeholder] expected_box_classses = np.array([0, 3, 3]) with self.test_session() as sess: box_locations, box_classes = sess.run([box_locations, box_classes], feed_dict=feed_dict) self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6, atol=1e-6) self.assertAllEqual(box_classes, expected_box_classses) def test_return_only_valid_boxes_when_input_contains_invalid_boxes(self): num_classes = 4 num_valid_boxes = 3 num_boxes = 10 code_size = 4 dense_location_placeholder = tf.placeholder(tf.float32, shape=(num_boxes, code_size)) dense_num_boxes_placeholder = tf.placeholder(tf.int32, shape=(num_classes)) box_locations, box_classes = ops.dense_to_sparse_boxes( dense_location_placeholder, dense_num_boxes_placeholder, num_classes) feed_dict = {dense_location_placeholder: np.random.uniform( size=[num_boxes, code_size]), dense_num_boxes_placeholder: np.array([1, 0, 0, 2], dtype=np.int32)} expected_box_locations = (feed_dict[dense_location_placeholder] [:num_valid_boxes]) expected_box_classses = np.array([0, 3, 3]) with self.test_session() as sess: box_locations, box_classes = sess.run([box_locations, box_classes], feed_dict=feed_dict) self.assertAllClose(box_locations, expected_box_locations, rtol=1e-6, atol=1e-6) self.assertAllEqual(box_classes, expected_box_classses) class OpsTestIndicesToDenseVector(tf.test.TestCase): def test_indices_to_dense_vector(self): size = 10000 num_indices = np.random.randint(size) rand_indices = np.random.permutation(np.arange(size))[0:num_indices] expected_output = np.zeros(size, dtype=np.float32) expected_output[rand_indices] = 1. tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector(tf_rand_indices, size) with self.test_session() as sess: output = sess.run(indicator) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) def test_indices_to_dense_vector_size_at_inference(self): size = 5000 num_indices = 250 all_indices = np.arange(size) rand_indices = np.random.permutation(all_indices)[0:num_indices] expected_output = np.zeros(size, dtype=np.float32) expected_output[rand_indices] = 1. tf_all_indices = tf.placeholder(tf.int32) tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector(tf_rand_indices, tf.shape(tf_all_indices)[0]) feed_dict = {tf_all_indices: all_indices} with self.test_session() as sess: output = sess.run(indicator, feed_dict=feed_dict) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) def test_indices_to_dense_vector_int(self): size = 500 num_indices = 25 rand_indices = np.random.permutation(np.arange(size))[0:num_indices] expected_output = np.zeros(size, dtype=np.int64) expected_output[rand_indices] = 1 tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector( tf_rand_indices, size, 1, dtype=tf.int64) with self.test_session() as sess: output = sess.run(indicator) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) def test_indices_to_dense_vector_custom_values(self): size = 100 num_indices = 10 rand_indices = np.random.permutation(np.arange(size))[0:num_indices] indices_value = np.random.rand(1) default_value = np.random.rand(1) expected_output = np.float32(np.ones(size) * default_value) expected_output[rand_indices] = indices_value tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector( tf_rand_indices, size, indices_value=indices_value, default_value=default_value) with self.test_session() as sess: output = sess.run(indicator) self.assertAllClose(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) def test_indices_to_dense_vector_all_indices_as_input(self): size = 500 num_indices = 500 rand_indices = np.random.permutation(np.arange(size))[0:num_indices] expected_output = np.ones(size, dtype=np.float32) tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector(tf_rand_indices, size) with self.test_session() as sess: output = sess.run(indicator) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) def test_indices_to_dense_vector_empty_indices_as_input(self): size = 500 rand_indices = [] expected_output = np.zeros(size, dtype=np.float32) tf_rand_indices = tf.constant(rand_indices) indicator = ops.indices_to_dense_vector(tf_rand_indices, size) with self.test_session() as sess: output = sess.run(indicator) self.assertAllEqual(output, expected_output) self.assertEqual(output.dtype, expected_output.dtype) class GroundtruthFilterTest(tf.test.TestCase): def test_filter_groundtruth(self): input_image = tf.placeholder(tf.float32, shape=(None, None, 3)) input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_is_crowd = tf.placeholder(tf.bool, shape=(None,)) input_area = tf.placeholder(tf.float32, shape=(None,)) input_difficult = tf.placeholder(tf.float32, shape=(None,)) input_label_types = tf.placeholder(tf.string, shape=(None,)) valid_indices = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult, fields.InputDataFields.groundtruth_label_types: input_label_types } output_tensors = ops.retain_groundtruth(input_tensors, valid_indices) image_tensor = np.random.rand(224, 224, 3) feed_dict = { input_image: image_tensor, input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([32, 48], dtype=np.float32), input_difficult: np.array([True, False], dtype=np.bool), input_label_types: np.array(['APPROPRIATE', 'INCORRECT'], dtype=np.string_), valid_indices: np.array([0], dtype=np.int32) } expected_tensors = { fields.InputDataFields.image: image_tensor, fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [32], fields.InputDataFields.groundtruth_difficult: [True], fields.InputDataFields.groundtruth_label_types: ['APPROPRIATE'] } with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in [fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_label_types]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) def test_filter_with_missing_fields(self): input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes } valid_indices = tf.placeholder(tf.int32, shape=(None,)) feed_dict = { input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), valid_indices: np.array([0], dtype=np.int32) } expected_tensors = { fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1] } output_tensors = ops.retain_groundtruth(input_tensors, valid_indices) with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in [fields.InputDataFields.groundtruth_boxes]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) def test_filter_with_empty_fields(self): input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_is_crowd = tf.placeholder(tf.bool, shape=(None,)) input_area = tf.placeholder(tf.float32, shape=(None,)) input_difficult = tf.placeholder(tf.float32, shape=(None,)) valid_indices = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult } output_tensors = ops.retain_groundtruth(input_tensors, valid_indices) feed_dict = { input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 2], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([], dtype=np.float32), input_difficult: np.array([], dtype=np.float32), valid_indices: np.array([0], dtype=np.int32) } expected_tensors = { fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [], fields.InputDataFields.groundtruth_difficult: [] } with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in [fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) def test_filter_with_empty_groundtruth_boxes(self): input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_is_crowd = tf.placeholder(tf.bool, shape=(None,)) input_area = tf.placeholder(tf.float32, shape=(None,)) input_difficult = tf.placeholder(tf.float32, shape=(None,)) valid_indices = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult } output_tensors = ops.retain_groundtruth(input_tensors, valid_indices) feed_dict = { input_boxes: np.array([], dtype=np.float).reshape(0, 4), input_classes: np.array([], dtype=np.int32), input_is_crowd: np.array([], dtype=np.bool), input_area: np.array([], dtype=np.float32), input_difficult: np.array([], dtype=np.float32), valid_indices: np.array([], dtype=np.int32) } with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in input_tensors: if key == fields.InputDataFields.groundtruth_boxes: self.assertAllEqual([0, 4], output_tensors[key].shape) else: self.assertAllEqual([0], output_tensors[key].shape) class RetainGroundTruthWithPositiveClasses(tf.test.TestCase): def test_filter_groundtruth_with_positive_classes(self): input_image = tf.placeholder(tf.float32, shape=(None, None, 3)) input_boxes = tf.placeholder(tf.float32, shape=(None, 4)) input_classes = tf.placeholder(tf.int32, shape=(None,)) input_is_crowd = tf.placeholder(tf.bool, shape=(None,)) input_area = tf.placeholder(tf.float32, shape=(None,)) input_difficult = tf.placeholder(tf.float32, shape=(None,)) input_label_types = tf.placeholder(tf.string, shape=(None,)) valid_indices = tf.placeholder(tf.int32, shape=(None,)) input_tensors = { fields.InputDataFields.image: input_image, fields.InputDataFields.groundtruth_boxes: input_boxes, fields.InputDataFields.groundtruth_classes: input_classes, fields.InputDataFields.groundtruth_is_crowd: input_is_crowd, fields.InputDataFields.groundtruth_area: input_area, fields.InputDataFields.groundtruth_difficult: input_difficult, fields.InputDataFields.groundtruth_label_types: input_label_types } output_tensors = ops.retain_groundtruth_with_positive_classes(input_tensors) image_tensor = np.random.rand(224, 224, 3) feed_dict = { input_image: image_tensor, input_boxes: np.array([[0.2, 0.4, 0.1, 0.8], [0.2, 0.4, 1.0, 0.8]], dtype=np.float), input_classes: np.array([1, 0], dtype=np.int32), input_is_crowd: np.array([False, True], dtype=np.bool), input_area: np.array([32, 48], dtype=np.float32), input_difficult: np.array([True, False], dtype=np.bool), input_label_types: np.array(['APPROPRIATE', 'INCORRECT'], dtype=np.string_), valid_indices: np.array([0], dtype=np.int32) } expected_tensors = { fields.InputDataFields.image: image_tensor, fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [32], fields.InputDataFields.groundtruth_difficult: [True], fields.InputDataFields.groundtruth_label_types: ['APPROPRIATE'] } with self.test_session() as sess: output_tensors = sess.run(output_tensors, feed_dict=feed_dict) for key in [fields.InputDataFields.image, fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd, fields.InputDataFields.groundtruth_label_types]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) class ReplaceNaNGroundtruthLabelScoresWithOnes(tf.test.TestCase): def test_replace_nan_groundtruth_label_scores_with_ones(self): label_scores = tf.constant([np.nan, 1.0, np.nan]) output_tensor = ops.replace_nan_groundtruth_label_scores_with_ones( label_scores) expected_tensor = [1.0, 1.0, 1.0] with self.test_session(): output_tensor = output_tensor.eval() self.assertAllClose(expected_tensor, output_tensor) def test_input_equals_output_when_no_nans(self): input_label_scores = [0.5, 1.0, 1.0] label_scores_tensor = tf.constant(input_label_scores) output_label_scores = ops.replace_nan_groundtruth_label_scores_with_ones( label_scores_tensor) with self.test_session(): output_label_scores = output_label_scores.eval() self.assertAllClose(input_label_scores, output_label_scores) class GroundtruthFilterWithCrowdBoxesTest(tf.test.TestCase): def test_filter_groundtruth_with_crowd_boxes(self): input_tensors = { fields.InputDataFields.groundtruth_boxes: [[0.1, 0.2, 0.6, 0.8], [0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1, 2], fields.InputDataFields.groundtruth_is_crowd: [True, False], fields.InputDataFields.groundtruth_area: [100.0, 238.7] } expected_tensors = { fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [2], fields.InputDataFields.groundtruth_is_crowd: [False], fields.InputDataFields.groundtruth_area: [238.7] } output_tensors = ops.filter_groundtruth_with_crowd_boxes( input_tensors) with self.test_session() as sess: output_tensors = sess.run(output_tensors) for key in [fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) class GroundtruthFilterWithNanBoxTest(tf.test.TestCase): def test_filter_groundtruth_with_nan_box_coordinates(self): input_tensors = { fields.InputDataFields.groundtruth_boxes: [[np.nan, np.nan, np.nan, np.nan], [0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [1, 2], fields.InputDataFields.groundtruth_is_crowd: [False, True], fields.InputDataFields.groundtruth_area: [100.0, 238.7] } expected_tensors = { fields.InputDataFields.groundtruth_boxes: [[0.2, 0.4, 0.1, 0.8]], fields.InputDataFields.groundtruth_classes: [2], fields.InputDataFields.groundtruth_is_crowd: [True], fields.InputDataFields.groundtruth_area: [238.7] } output_tensors = ops.filter_groundtruth_with_nan_box_coordinates( input_tensors) with self.test_session() as sess: output_tensors = sess.run(output_tensors) for key in [fields.InputDataFields.groundtruth_boxes, fields.InputDataFields.groundtruth_area]: self.assertAllClose(expected_tensors[key], output_tensors[key]) for key in [fields.InputDataFields.groundtruth_classes, fields.InputDataFields.groundtruth_is_crowd]: self.assertAllEqual(expected_tensors[key], output_tensors[key]) class OpsTestNormalizeToTarget(tf.test.TestCase): def test_create_normalize_to_target(self): inputs = tf.random_uniform([5, 10, 12, 3]) target_norm_value = 4.0 dim = 3 with self.test_session(): output = ops.normalize_to_target(inputs, target_norm_value, dim) self.assertEqual(output.op.name, 'NormalizeToTarget/mul') var_name = tf.contrib.framework.get_variables()[0].name self.assertEqual(var_name, 'NormalizeToTarget/weights:0') def test_invalid_dim(self): inputs = tf.random_uniform([5, 10, 12, 3]) target_norm_value = 4.0 dim = 10 with self.assertRaisesRegexp( ValueError, 'dim must be non-negative but smaller than the input rank.'): ops.normalize_to_target(inputs, target_norm_value, dim) def test_invalid_target_norm_values(self): inputs = tf.random_uniform([5, 10, 12, 3]) target_norm_value = [4.0, 4.0] dim = 3 with self.assertRaisesRegexp( ValueError, 'target_norm_value must be a float or a list of floats'): ops.normalize_to_target(inputs, target_norm_value, dim) def test_correct_output_shape(self): inputs = tf.random_uniform([5, 10, 12, 3]) target_norm_value = 4.0 dim = 3 with self.test_session(): output = ops.normalize_to_target(inputs, target_norm_value, dim) self.assertEqual(output.get_shape().as_list(), inputs.get_shape().as_list()) def test_correct_initial_output_values(self): inputs = tf.constant([[[[3, 4], [7, 24]], [[5, -12], [-1, 0]]]], tf.float32) target_norm_value = 10.0 dim = 3 expected_output = [[[[30/5.0, 40/5.0], [70/25.0, 240/25.0]], [[50/13.0, -120/13.0], [-10, 0]]]] with self.test_session() as sess: normalized_inputs = ops.normalize_to_target(inputs, target_norm_value, dim) sess.run(tf.global_variables_initializer()) output = normalized_inputs.eval() self.assertAllClose(output, expected_output) def test_multiple_target_norm_values(self): inputs = tf.constant([[[[3, 4], [7, 24]], [[5, -12], [-1, 0]]]], tf.float32) target_norm_value = [10.0, 20.0] dim = 3 expected_output = [[[[30/5.0, 80/5.0], [70/25.0, 480/25.0]], [[50/13.0, -240/13.0], [-10, 0]]]] with self.test_session() as sess: normalized_inputs = ops.normalize_to_target(inputs, target_norm_value, dim) sess.run(tf.global_variables_initializer()) output = normalized_inputs.eval() self.assertAllClose(output, expected_output) class OpsTestPositionSensitiveCropRegions(tf.test.TestCase): def test_position_sensitive(self): num_spatial_bins = [3, 2] image_shape = [3, 2, 6] # First channel is 1's, second channel is 2's, etc. image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32, shape=image_shape) boxes = tf.random_uniform((2, 4)) # The result for both boxes should be [[1, 2], [3, 4], [5, 6]] # before averaging. expected_output = np.array([3.5, 3.5]).reshape([2, 1, 1, 1]) for crop_size_mult in range(1, 3): crop_size = [3 * crop_size_mult, 2 * crop_size_mult] ps_crop_and_pool = ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: output = sess.run(ps_crop_and_pool) self.assertAllClose(output, expected_output) def test_position_sensitive_with_equal_channels(self): num_spatial_bins = [2, 2] image_shape = [3, 3, 4] crop_size = [2, 2] image = tf.constant(range(1, 3 * 3 + 1), dtype=tf.float32, shape=[3, 3, 1]) tiled_image = tf.tile(image, [1, 1, image_shape[2]]) boxes = tf.random_uniform((3, 4)) box_ind = tf.constant([0, 0, 0], dtype=tf.int32) # All channels are equal so position-sensitive crop and resize should # work as the usual crop and resize for just one channel. crop = tf.image.crop_and_resize(tf.expand_dims(image, axis=0), boxes, box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keep_dims=True) ps_crop_and_pool = ops.position_sensitive_crop_regions( tiled_image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output) def test_raise_value_error_on_num_bins_less_than_one(self): num_spatial_bins = [1, -1] image_shape = [1, 1, 2] crop_size = [2, 2] image = tf.constant(1, dtype=tf.float32, shape=image_shape) boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32) with self.assertRaisesRegexp(ValueError, 'num_spatial_bins should be >= 1'): ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) def test_raise_value_error_on_non_divisible_crop_size(self): num_spatial_bins = [2, 3] image_shape = [1, 1, 6] crop_size = [3, 2] image = tf.constant(1, dtype=tf.float32, shape=image_shape) boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32) with self.assertRaisesRegexp( ValueError, 'crop_size should be divisible by num_spatial_bins'): ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) def test_raise_value_error_on_non_divisible_num_channels(self): num_spatial_bins = [2, 2] image_shape = [1, 1, 5] crop_size = [2, 2] image = tf.constant(1, dtype=tf.float32, shape=image_shape) boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32) with self.assertRaisesRegexp( ValueError, 'Dimension size must be evenly divisible by 4 but is 5'): ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) def test_position_sensitive_with_global_pool_false(self): num_spatial_bins = [3, 2] image_shape = [3, 2, 6] num_boxes = 2 # First channel is 1's, second channel is 2's, etc. image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32, shape=image_shape) boxes = tf.random_uniform((num_boxes, 4)) expected_output = [] # Expected output, when crop_size = [3, 2]. expected_output.append(np.expand_dims( np.tile(np.array([[1, 2], [3, 4], [5, 6]]), (num_boxes, 1, 1)), axis=-1)) # Expected output, when crop_size = [6, 4]. expected_output.append(np.expand_dims( np.tile(np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4], [5, 5, 6, 6], [5, 5, 6, 6]]), (num_boxes, 1, 1)), axis=-1)) for crop_size_mult in range(1, 3): crop_size = [3 * crop_size_mult, 2 * crop_size_mult] ps_crop = ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output[crop_size_mult - 1]) def test_position_sensitive_with_global_pool_false_and_do_global_pool(self): num_spatial_bins = [3, 2] image_shape = [3, 2, 6] num_boxes = 2 # First channel is 1's, second channel is 2's, etc. image = tf.constant(range(1, 3 * 2 + 1) * 6, dtype=tf.float32, shape=image_shape) boxes = tf.random_uniform((num_boxes, 4)) expected_output = [] # Expected output, when crop_size = [3, 2]. expected_output.append(np.mean( np.expand_dims( np.tile(np.array([[1, 2], [3, 4], [5, 6]]), (num_boxes, 1, 1)), axis=-1), axis=(1, 2), keepdims=True)) # Expected output, when crop_size = [6, 4]. expected_output.append(np.mean( np.expand_dims( np.tile(np.array([[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4], [5, 5, 6, 6], [5, 5, 6, 6]]), (num_boxes, 1, 1)), axis=-1), axis=(1, 2), keepdims=True)) for crop_size_mult in range(1, 3): crop_size = [3 * crop_size_mult, 2 * crop_size_mult] # Perform global_pooling after running the function with # global_pool=False. ps_crop = ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=False) ps_crop_and_pool = tf.reduce_mean( ps_crop, reduction_indices=(1, 2), keep_dims=True) with self.test_session() as sess: output = sess.run(ps_crop_and_pool) self.assertAllEqual(output, expected_output[crop_size_mult - 1]) def test_raise_value_error_on_non_square_block_size(self): num_spatial_bins = [3, 2] image_shape = [3, 2, 6] crop_size = [6, 2] image = tf.constant(1, dtype=tf.float32, shape=image_shape) boxes = tf.constant([[0, 0, 1, 1]], dtype=tf.float32) with self.assertRaisesRegexp( ValueError, 'Only support square bin crop size for now.'): ops.position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=False) class OpsTestBatchPositionSensitiveCropRegions(tf.test.TestCase): def test_position_sensitive_with_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [2, 2] image = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # When a single bin is used, position-sensitive crop and pool should be # the same as non-position sensitive crop and pool. crop = tf.image.crop_and_resize(image, tf.reshape(boxes, [-1, 4]), box_ind, crop_size) crop_and_pool = tf.reduce_mean(crop, [1, 2], keepdims=True) crop_and_pool = tf.reshape(crop_and_pool, [2, 3, 1, 1, 4]) ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( image, boxes, crop_size, num_spatial_bins, global_pool=True) with self.test_session() as sess: expected_output, output = sess.run((crop_and_pool, ps_crop_and_pool)) self.assertAllClose(output, expected_output) def test_position_sensitive_with_global_pool_false_and_known_boxes(self): num_spatial_bins = [2, 2] image_shape = [2, 2, 2, 4] crop_size = [2, 2] images = tf.constant(range(1, 2 * 2 * 4 + 1) * 2, dtype=tf.float32, shape=image_shape) # First box contains whole image, and second box contains only first row. boxes = tf.constant(np.array([[[0., 0., 1., 1.]], [[0., 0., 0.5, 1.]]]), dtype=tf.float32) # box_ind = tf.constant([0, 1], dtype=tf.int32) expected_output = [] # Expected output, when the box containing whole image. expected_output.append( np.reshape(np.array([[4, 7], [10, 13]]), (1, 2, 2, 1)) ) # Expected output, when the box containing only first row. expected_output.append( np.reshape(np.array([[3, 6], [7, 10]]), (1, 2, 2, 1)) ) expected_output = np.stack(expected_output, axis=0) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: output = sess.run(ps_crop) self.assertAllEqual(output, expected_output) def test_position_sensitive_with_global_pool_false_and_single_bin(self): num_spatial_bins = [1, 1] image_shape = [2, 3, 3, 4] crop_size = [1, 1] images = tf.random_uniform(image_shape) boxes = tf.random_uniform((2, 3, 4)) # box_ind = tf.constant([0, 0, 0, 1, 1, 1], dtype=tf.int32) # Since single_bin is used and crop_size = [1, 1] (i.e., no crop resize), # the outputs are the same whatever the global_pool value is. ps_crop_and_pool = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=True) ps_crop = ops.batch_position_sensitive_crop_regions( images, boxes, crop_size, num_spatial_bins, global_pool=False) with self.test_session() as sess: pooled_output, unpooled_output = sess.run((ps_crop_and_pool, ps_crop)) self.assertAllClose(pooled_output, unpooled_output) class ReframeBoxMasksToImageMasksTest(tf.test.TestCase): def testZeroImageOnEmptyMask(self): box_masks = tf.constant([[[0, 0], [0, 0]]], dtype=tf.float32) boxes = tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32) image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes, image_height=4, image_width=4) np_expected_image_masks = np.array([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]], dtype=np.float32) with self.test_session() as sess: np_image_masks = sess.run(image_masks) self.assertAllClose(np_image_masks, np_expected_image_masks) def testZeroBoxMasks(self): box_masks = tf.zeros([0, 3, 3], dtype=tf.float32) boxes = tf.zeros([0, 4], dtype=tf.float32) image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes, image_height=4, image_width=4) with self.test_session() as sess: np_image_masks = sess.run(image_masks) self.assertAllEqual(np_image_masks.shape, np.array([0, 4, 4])) def testMaskIsCenteredInImageWhenBoxIsCentered(self): box_masks = tf.constant([[[1, 1], [1, 1]]], dtype=tf.float32) boxes = tf.constant([[0.25, 0.25, 0.75, 0.75]], dtype=tf.float32) image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes, image_height=4, image_width=4) np_expected_image_masks = np.array([[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]]], dtype=np.float32) with self.test_session() as sess: np_image_masks = sess.run(image_masks) self.assertAllClose(np_image_masks, np_expected_image_masks) def testMaskOffCenterRemainsOffCenterInImage(self): box_masks = tf.constant([[[1, 0], [0, 1]]], dtype=tf.float32) boxes = tf.constant([[0.25, 0.5, 0.75, 1.0]], dtype=tf.float32) image_masks = ops.reframe_box_masks_to_image_masks(box_masks, boxes, image_height=4, image_width=4) np_expected_image_masks = np.array([[[0, 0, 0, 0], [0, 0, 0.6111111, 0.16666669], [0, 0, 0.3888889, 0.83333337], [0, 0, 0, 0]]], dtype=np.float32) with self.test_session() as sess: np_image_masks = sess.run(image_masks) self.assertAllClose(np_image_masks, np_expected_image_masks) class MergeBoxesWithMultipleLabelsTest(tf.test.TestCase): def testMergeBoxesWithMultipleLabels(self): boxes = tf.constant( [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75], [0.25, 0.25, 0.75, 0.75]], dtype=tf.float32) class_indices = tf.constant([0, 4, 2], dtype=tf.int32) class_confidences = tf.constant([0.8, 0.2, 0.1], dtype=tf.float32) num_classes = 5 merged_boxes, merged_classes, merged_confidences, merged_box_indices = ( ops.merge_boxes_with_multiple_labels( boxes, class_indices, class_confidences, num_classes)) expected_merged_boxes = np.array( [[0.25, 0.25, 0.75, 0.75], [0.0, 0.0, 0.5, 0.75]], dtype=np.float32) expected_merged_classes = np.array( [[1, 0, 1, 0, 0], [0, 0, 0, 0, 1]], dtype=np.int32) expected_merged_confidences = np.array( [[0.8, 0, 0.1, 0, 0], [0, 0, 0, 0, 0.2]], dtype=np.float32) expected_merged_box_indices = np.array([0, 1], dtype=np.int32) with self.test_session() as sess: (np_merged_boxes, np_merged_classes, np_merged_confidences, np_merged_box_indices) = sess.run( [merged_boxes, merged_classes, merged_confidences, merged_box_indices]) self.assertAllClose(np_merged_boxes, expected_merged_boxes) self.assertAllClose(np_merged_classes, expected_merged_classes) self.assertAllClose(np_merged_confidences, expected_merged_confidences) self.assertAllClose(np_merged_box_indices, expected_merged_box_indices) def testMergeBoxesWithMultipleLabelsCornerCase(self): boxes = tf.constant( [[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 1, 1], [0, 1, 1, 1], [0, 0, 1, 1]], dtype=tf.float32) class_indices = tf.constant([0, 1, 2, 3, 2, 1, 0, 3], dtype=tf.int32) class_confidences = tf.constant([0.1, 0.9, 0.2, 0.8, 0.3, 0.7, 0.4, 0.6], dtype=tf.float32) num_classes = 4 merged_boxes, merged_classes, merged_confidences, merged_box_indices = ( ops.merge_boxes_with_multiple_labels( boxes, class_indices, class_confidences, num_classes)) expected_merged_boxes = np.array( [[0, 0, 1, 1], [0, 1, 1, 1], [1, 0, 1, 1], [1, 1, 1, 1]], dtype=np.float32) expected_merged_classes = np.array( [[1, 0, 0, 1], [1, 1, 0, 0], [0, 1, 1, 0], [0, 0, 1, 1]], dtype=np.int32) expected_merged_confidences = np.array( [[0.1, 0, 0, 0.6], [0.4, 0.9, 0, 0], [0, 0.7, 0.2, 0], [0, 0, 0.3, 0.8]], dtype=np.float32) expected_merged_box_indices = np.array([0, 1, 2, 3], dtype=np.int32) with self.test_session() as sess: (np_merged_boxes, np_merged_classes, np_merged_confidences, np_merged_box_indices) = sess.run( [merged_boxes, merged_classes, merged_confidences, merged_box_indices]) self.assertAllClose(np_merged_boxes, expected_merged_boxes) self.assertAllClose(np_merged_classes, expected_merged_classes) self.assertAllClose(np_merged_confidences, expected_merged_confidences) self.assertAllClose(np_merged_box_indices, expected_merged_box_indices) def testMergeBoxesWithEmptyInputs(self): boxes = tf.zeros([0, 4], dtype=tf.float32) class_indices = tf.constant([], dtype=tf.int32) class_confidences = tf.constant([], dtype=tf.float32) num_classes = 5 merged_boxes, merged_classes, merged_confidences, merged_box_indices = ( ops.merge_boxes_with_multiple_labels( boxes, class_indices, class_confidences, num_classes)) with self.test_session() as sess: (np_merged_boxes, np_merged_classes, np_merged_confidences, np_merged_box_indices) = sess.run( [merged_boxes, merged_classes, merged_confidences, merged_box_indices]) self.assertAllEqual(np_merged_boxes.shape, [0, 4]) self.assertAllEqual(np_merged_classes.shape, [0, 5]) self.assertAllEqual(np_merged_confidences.shape, [0, 5]) self.assertAllEqual(np_merged_box_indices.shape, [0]) class NearestNeighborUpsamplingTest(test_case.TestCase): def test_upsampling_with_single_scale(self): def graph_fn(inputs): custom_op_output = ops.nearest_neighbor_upsampling(inputs, scale=2) return custom_op_output inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1]) custom_op_output = self.execute(graph_fn, [inputs]) expected_output = [[[[0], [0], [1], [1]], [[0], [0], [1], [1]], [[2], [2], [3], [3]], [[2], [2], [3], [3]]]] self.assertAllClose(custom_op_output, expected_output) def test_upsampling_with_separate_height_width_scales(self): def graph_fn(inputs): custom_op_output = ops.nearest_neighbor_upsampling(inputs, height_scale=2, width_scale=3) return custom_op_output inputs = np.reshape(np.arange(4).astype(np.float32), [1, 2, 2, 1]) custom_op_output = self.execute(graph_fn, [inputs]) expected_output = [[[[0], [0], [0], [1], [1], [1]], [[0], [0], [0], [1], [1], [1]], [[2], [2], [2], [3], [3], [3]], [[2], [2], [2], [3], [3], [3]]]] self.assertAllClose(custom_op_output, expected_output) class MatmulGatherOnZerothAxis(test_case.TestCase): def test_gather_2d(self): def graph_fn(params, indices): return ops.matmul_gather_on_zeroth_axis(params, indices) params = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [0, 1, 0, 0]], dtype=np.float32) indices = np.array([2, 2, 1], dtype=np.int32) expected_output = np.array([[9, 10, 11, 12], [9, 10, 11, 12], [5, 6, 7, 8]]) gather_output = self.execute(graph_fn, [params, indices]) self.assertAllClose(gather_output, expected_output) def test_gather_3d(self): def graph_fn(params, indices): return ops.matmul_gather_on_zeroth_axis(params, indices) params = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]], [[0, 1], [0, 0]]], dtype=np.float32) indices = np.array([0, 3, 1], dtype=np.int32) expected_output = np.array([[[1, 2], [3, 4]], [[0, 1], [0, 0]], [[5, 6], [7, 8]]]) gather_output = self.execute(graph_fn, [params, indices]) self.assertAllClose(gather_output, expected_output) def test_gather_with_many_indices(self): def graph_fn(params, indices): return ops.matmul_gather_on_zeroth_axis(params, indices) params = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [0, 1, 0, 0]], dtype=np.float32) indices = np.array([0, 0, 0, 0, 0, 0], dtype=np.int32) expected_output = np.array(6*[[1, 2, 3, 4]]) gather_output = self.execute(graph_fn, [params, indices]) self.assertAllClose(gather_output, expected_output) def test_gather_with_dynamic_shape_input(self): params_placeholder = tf.placeholder(tf.float32, shape=[None, 4]) indices_placeholder = tf.placeholder(tf.int32, shape=[None]) gather_result = ops.matmul_gather_on_zeroth_axis( params_placeholder, indices_placeholder) params = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [0, 1, 0, 0]], dtype=np.float32) indices = np.array([0, 0, 0, 0, 0, 0]) expected_output = np.array(6*[[1, 2, 3, 4]]) with self.test_session() as sess: gather_output = sess.run(gather_result, feed_dict={ params_placeholder: params, indices_placeholder: indices}) self.assertAllClose(gather_output, expected_output) class OpsTestMatMulCropAndResize(test_case.TestCase): def testMatMulCropAndResize2x2To1x1(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1]) image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32) expected_output = [[[[[2.5]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize2x2To1x1Flipped(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[1, 1]) image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32) boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32) expected_output = [[[[[2.5]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize2x2To3x3(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3]) image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1]]], dtype=np.float32) expected_output = [[[[[1.0], [1.5], [2.0]], [[2.0], [2.5], [3.0]], [[3.0], [3.5], [4.0]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize2x2To3x3Flipped(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[3, 3]) image = np.array([[[[1], [2]], [[3], [4]]]], dtype=np.float32) boxes = np.array([[[1, 1, 0, 0]]], dtype=np.float32) expected_output = [[[[[4.0], [3.5], [3.0]], [[3.0], [2.5], [2.0]], [[2.0], [1.5], [1.0]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize3x3To2x2(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2]) image = np.array([[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1], [0, 0, .5, .5]]], dtype=np.float32) expected_output = [[[[[1], [3]], [[7], [9]]], [[[1], [2]], [[4], [5]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize3x3To2x2_2Channels(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2]) image = np.array([[[[1, 0], [2, 1], [3, 2]], [[4, 3], [5, 4], [6, 5]], [[7, 6], [8, 7], [9, 8]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1], [0, 0, .5, .5]]], dtype=np.float32) expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]], [[[1, 0], [2, 1]], [[4, 3], [5, 4]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testBatchMatMulCropAndResize3x3To2x2_2Channels(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2]) image = np.array([[[[1, 0], [2, 1], [3, 2]], [[4, 3], [5, 4], [6, 5]], [[7, 6], [8, 7], [9, 8]]], [[[1, 0], [2, 1], [3, 2]], [[4, 3], [5, 4], [6, 5]], [[7, 6], [8, 7], [9, 8]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1], [0, 0, .5, .5]], [[1, 1, 0, 0], [.5, .5, 0, 0]]], dtype=np.float32) expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]], [[[1, 0], [2, 1]], [[4, 3], [5, 4]]]], [[[[9, 8], [7, 6]], [[3, 2], [1, 0]]], [[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testMatMulCropAndResize3x3To2x2Flipped(self): def graph_fn(image, boxes): return ops.matmul_crop_and_resize(image, boxes, crop_size=[2, 2]) image = np.array([[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]], dtype=np.float32) boxes = np.array([[[1, 1, 0, 0], [.5, .5, 0, 0]]], dtype=np.float32) expected_output = [[[[[9], [7]], [[3], [1]]], [[[5], [4]], [[2], [1]]]]] crop_output = self.execute(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) def testInvalidInputShape(self): image = tf.constant([[[1], [2]], [[3], [4]]], dtype=tf.float32) boxes = tf.constant([[-1, -1, 1, 1]], dtype=tf.float32) crop_size = [4, 4] with self.assertRaises(ValueError): _ = ops.matmul_crop_and_resize(image, boxes, crop_size) class OpsTestCropAndResize(test_case.TestCase): def testBatchCropAndResize3x3To2x2_2Channels(self): def graph_fn(image, boxes): return ops.native_crop_and_resize(image, boxes, crop_size=[2, 2]) image = np.array([[[[1, 0], [2, 1], [3, 2]], [[4, 3], [5, 4], [6, 5]], [[7, 6], [8, 7], [9, 8]]], [[[1, 0], [2, 1], [3, 2]], [[4, 3], [5, 4], [6, 5]], [[7, 6], [8, 7], [9, 8]]]], dtype=np.float32) boxes = np.array([[[0, 0, 1, 1], [0, 0, .5, .5]], [[1, 1, 0, 0], [.5, .5, 0, 0]]], dtype=np.float32) expected_output = [[[[[1, 0], [3, 2]], [[7, 6], [9, 8]]], [[[1, 0], [2, 1]], [[4, 3], [5, 4]]]], [[[[9, 8], [7, 6]], [[3, 2], [1, 0]]], [[[5, 4], [4, 3]], [[2, 1], [1, 0]]]]] crop_output = self.execute_cpu(graph_fn, [image, boxes]) self.assertAllClose(crop_output, expected_output) if __name__ == '__main__': tf.test.main()
TensorFlow/Detection/SSD/models/research/object_detection/models
models
faster_rcnn_inception_resnet_v2_feature_extractor_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for models.faster_rcnn_inception_resnet_v2_feature_extractor.""" import tensorflow as tf from object_detection.models import faster_rcnn_inception_resnet_v2_feature_extractor as frcnn_inc_res class FasterRcnnInceptionResnetV2FeatureExtractorTest(tf.test.TestCase): def _build_feature_extractor(self, first_stage_features_stride): return frcnn_inc_res.FasterRCNNInceptionResnetV2FeatureExtractor( is_training=False, first_stage_features_stride=first_stage_features_stride, batch_norm_trainable=False, reuse_weights=None, weight_decay=0.0) def test_extract_proposal_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 299, 299, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 19, 19, 1088]) def test_extract_proposal_features_stride_eight(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=8) preprocessed_inputs = tf.random_uniform( [1, 224, 224, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 28, 28, 1088]) def test_extract_proposal_features_half_size_input(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [1, 112, 112, 3], maxval=255, dtype=tf.float32) rpn_feature_map, _ = feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') features_shape = tf.shape(rpn_feature_map) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [1, 7, 7, 1088]) def test_extract_proposal_features_dies_on_invalid_stride(self): with self.assertRaises(ValueError): self._build_feature_extractor(first_stage_features_stride=99) def test_extract_proposal_features_dies_with_incorrect_rank_inputs(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) preprocessed_inputs = tf.random_uniform( [224, 224, 3], maxval=255, dtype=tf.float32) with self.assertRaises(ValueError): feature_extractor.extract_proposal_features( preprocessed_inputs, scope='TestScope') def test_extract_box_classifier_features_returns_expected_size(self): feature_extractor = self._build_feature_extractor( first_stage_features_stride=16) proposal_feature_maps = tf.random_uniform( [2, 17, 17, 1088], maxval=255, dtype=tf.float32) proposal_classifier_features = ( feature_extractor.extract_box_classifier_features( proposal_feature_maps, scope='TestScope')) features_shape = tf.shape(proposal_classifier_features) init_op = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init_op) features_shape_out = sess.run(features_shape) self.assertAllEqual(features_shape_out, [2, 8, 8, 1536]) if __name__ == '__main__': tf.test.main()
PyTorch/Segmentation/MaskRCNN/pytorch/scripts/docker
docker
interactive
#!/bin/bash PATH_TO_COCO=$1 MOUNT_LOCATION='/datasets/data' NAME='maskrcnn_interactive' docker run --runtime=nvidia -v $PATH_TO_COCO:/$MOUNT_LOCATION --rm --name=$NAME --shm-size=10g --ulimit memlock=-1 --ulimit stack=67108864 --ipc=host -t -i nvidia_joc_maskrcnn_pt bash
TensorFlow/LanguageModeling/BERT/scripts
scripts
finetune_inference_benchmark
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. task=${1:-"squad"} #Edit to save logs & checkpoints in a different directory RESULTS_DIR=/results if [ ! -d "$RESULTS_DIR" ] ; then echo "Error! $RESULTS_DIR directory missing." exit -1 fi echo "Results directory set as " $RESULTS_DIR LOGFILE="${RESULTS_DIR}/${task}_inference_benchmark_bert_${bert_model}.log" tmp_file="/tmp/${task}_inference_benchmark.log" if [ "$task" = "squad" ] ; then export SQUAD_DIR=data/download/squad/v1.1 echo "Squad directory set as " $SQUAD_DIR echo "Inference performance benchmarking for BERT $bert_model from $BERT_DIR" >> $LOGFILE for bert_model in "base" "large"; do echo "Model Sequence-Length Batch-size Precision Throughput-Average(sent/sec) Latency-Average(ms) Latency-50%(ms) Latency-90%(ms) Latency-95%(ms) Latency-99%(ms) Latency-100%(ms)" >> $LOGFILE if [ "$bert_model" = "large" ] ; then export BERT_DIR=data/download/nvidia_pretrained/bert_tf_pretraining_large_lamb else export BERT_DIR=data/download/nvidia_pretrained/bert_tf_squad11_base_128 fi echo "BERT directory set as " $BERT_DIR init_checkpoint="$BERT_DIR/model.ckpt" for seq_len in 128 384; do for bs in 1 2 4 8; do for use_fp16 in "--amp" "--noamp"; do python run_squad.py \ --vocab_file=$BERT_DIR/vocab.txt \ --bert_config_file=$BERT_DIR/bert_config.json \ --init_checkpoint=$init_checkpoint \ --do_predict=True \ --predict_file=$SQUAD_DIR/dev-v1.1.json \ --predict_batch_size=$bs \ --max_seq_length=$seq_len \ --doc_stride=128 \ --output_dir=${RESULTS_DIR} \ "$use_fp16" \ --use_xla --num_eval_iterations=1024 |& tee $tmp_file perf=`cat $tmp_file | grep -F 'INFO:tensorflow:Throughput Average (sentences/sec) =' | tail -1 | awk -F'= ' '{print $2}'` la=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Average (ms)' | awk -F'= ' '{print $2}'` l50=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Confidence Level 50 (ms)' | awk -F'= ' '{print $2}'` l90=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Confidence Level 90 (ms)' | awk -F'= ' '{print $2}'` l95=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Confidence Level 95 (ms)' | awk -F'= ' '{print $2}'` l99=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Confidence Level 99 (ms)' | awk -F'= ' '{print $2}'` l100=`cat $tmp_file | grep -F 'INFO:tensorflow:Latency Confidence Level 100 (ms)' | awk -F'= ' '{print $2}'` echo "$bert_model $seq_len $bs $use_fp16 $perf $la $l50 $l90 $l95 $l99 $l100" >> $LOGFILE done done done done else echo "Benchmarking for " $task "currently not supported. Sorry!" fi
TensorFlow2/Segmentation/nnUNet/data_preprocessing
data_preprocessing
preprocessor
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import math import os import pickle from pathlib import Path import nibabel import numpy as np from joblib import Parallel, delayed from runtime.utils import get_task_code, make_empty_dir from skimage.transform import resize from data_preprocessing import configs, transforms class Preprocessor: def __init__(self, args): self.args = args self.ct_min = 0 self.ct_max = 0 self.ct_mean = 0 self.ct_std = 0 self.target_spacing = None self.task = args.task self.task_code = get_task_code(args) self.patch_size = configs.patch_size[self.task_code] self.training = args.exec_mode == "training" self.data_path = args.data / configs.task[args.task] self.results = args.results / self.task_code if not self.training: self.results /= self.args.exec_mode metadata_path = self.data_path / "dataset.json" if self.args.exec_mode == "val": dataset_json = json.load(open(metadata_path, "r")) dataset_json["val"] = dataset_json["training"] with open(metadata_path, "w") as outfile: json.dump(dataset_json, outfile) self.metadata = json.load(open(metadata_path, "r")) self.modality = self.metadata["modality"]["0"] def run(self): print(f"Preprocessing {self.data_path}") make_empty_dir(self.results, force=self.args.force) if self.task_code in configs.spacings: self.target_spacing = configs.spacings[self.task_code] else: self.collect_spacings() print(f"Target spacing {self.target_spacing}") if self.modality == "CT": try: self.ct_min = configs.ct_min[self.task] self.ct_max = configs.ct_max[self.task] self.ct_mean = configs.ct_mean[self.task] self.ct_std = configs.ct_std[self.task] except KeyError: self.collect_intensities() _mean = round(self.ct_mean, 2) _std = round(self.ct_std, 2) print(f"[CT] min: {self.ct_min}, max: {self.ct_max}, mean: {_mean}, std: {_std}") self.run_parallel(self.preprocess_pair, self.args.exec_mode) pickle.dump( { "patch_size": self.patch_size, "spacings": self.target_spacing, "n_class": len(self.metadata["labels"]), "in_channels": len(self.metadata["modality"]), }, open(os.path.join(self.results, "config.pkl"), "wb"), ) def preprocess_pair(self, pair): fname = os.path.basename(pair["image"] if isinstance(pair, dict) else pair) image, label, image_spacings = self.load_pair(pair) original_size = image.shape[1:] image, label, bbox = transforms.crop_foreground(image, label) test_metadata = np.vstack([bbox, original_size]) if not self.training else None if self.args.dim == 3: image, label = self.resample(image, label, image_spacings) if self.modality == "CT": image = np.clip(image, self.ct_min, self.ct_max) image = self.normalize(image) if self.training: image, label = self.standardize(image, label) image, label = np.transpose(image, (1, 2, 3, 0)), np.transpose(label, (1, 2, 3, 0)) self.save(image, label, fname, test_metadata) def resample(self, image, label, image_spacings): if self.target_spacing != image_spacings: image, label = self.resample_pair(image, label, image_spacings) return image, label def standardize(self, image, label): pad_shape = self.calculate_pad_shape(image) image_shape = image.shape[1:] if pad_shape != image_shape: paddings = [(pad_sh - image_sh) / 2 for (pad_sh, image_sh) in zip(pad_shape, image_shape)] image = self.pad(image, paddings) label = self.pad(label, paddings) if self.args.dim == 2: # Center cropping 2D images. _, _, height, weight = image.shape start_h = (height - self.patch_size[0]) // 2 start_w = (weight - self.patch_size[1]) // 2 image = image[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]] label = label[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]] return image, label def normalize(self, image): if self.modality == "CT": return (image - self.ct_mean) / self.ct_std return transforms.normalize_intensity(image, nonzero=True, channel_wise=True) def save(self, image, label, fname, test_metadata): mean, std = np.round(np.mean(image, (0, 1, 2)), 2), np.round(np.std(image, (0, 1, 2)), 2) print(f"Saving {fname} shape {image.shape} mean {mean} std {std}") self.save_npy(image, fname, "_x.npy") if label is not None: self.save_npy(label, fname, "_y.npy") if test_metadata is not None: self.save_npy(test_metadata, fname, "_meta.npy") def load_pair(self, pair): image = self.load_nifty(pair["image"] if isinstance(pair, dict) else pair) image_spacing = self.load_spacing(image) image = image.get_fdata().astype(np.float32) image = self.standardize_layout(image) if self.training: label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8) label = self.standardize_layout(label) else: label = None return image, label, image_spacing def resample_pair(self, image, label, spacing): shape = self.calculate_new_shape(spacing, image.shape[1:]) if self.check_anisotrophy(spacing): image = self.resample_anisotrophic_image(image, shape) if label is not None: label = self.resample_anisotrophic_label(label, shape) else: image = self.resample_regular_image(image, shape) if label is not None: label = self.resample_regular_label(label, shape) image = image.astype(np.float32) if label is not None: label = label.astype(np.uint8) return image, label def calculate_pad_shape(self, image): min_shape = self.patch_size[:] image_shape = image.shape[1:] if len(min_shape) == 2: # In 2D case we don't want to pad depth axis. min_shape.insert(0, image_shape[0]) pad_shape = [max(mshape, ishape) for mshape, ishape in zip(min_shape, image_shape)] return pad_shape def get_intensities(self, pair): image = self.load_nifty(pair["image"]).get_fdata().astype(np.float32) label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8) foreground_idx = np.where(label > 0) intensities = image[foreground_idx].tolist() return intensities def collect_intensities(self): intensities = self.run_parallel(self.get_intensities, "training") intensities = list(itertools.chain.from_iterable(intensities)) self.ct_min, self.ct_max = np.percentile(intensities, [0.5, 99.5]) self.ct_mean, self.ct_std = np.mean(intensities), np.std(intensities) def get_spacing(self, pair): image = nibabel.load(self.data_path / pair["image"]) spacing = self.load_spacing(image) return spacing def collect_spacings(self): spacing = self.run_parallel(self.get_spacing, "training") spacing = np.array(spacing) target_spacing = np.median(spacing, axis=0) if max(target_spacing) / min(target_spacing) >= 3: lowres_axis = np.argmin(target_spacing) target_spacing[lowres_axis] = np.percentile(spacing[:, lowres_axis], 10) self.target_spacing = list(target_spacing) def check_anisotrophy(self, spacing): def check(spacing): return np.max(spacing) / np.min(spacing) >= 3 return check(spacing) or check(self.target_spacing) def calculate_new_shape(self, spacing, shape): spacing_ratio = np.array(spacing) / np.array(self.target_spacing) new_shape = (spacing_ratio * np.array(shape)).astype(int).tolist() return new_shape def save_npy(self, image, fname, suffix): np.save(os.path.join(self.results, fname.replace(".nii.gz", suffix)), image, allow_pickle=False) def run_parallel(self, func, exec_mode): return Parallel(n_jobs=self.args.n_jobs)(delayed(func)(pair) for pair in self.metadata[exec_mode]) def load_nifty(self, fname): return nibabel.load(os.path.join(self.data_path, fname)) @staticmethod def load_spacing(image): return image.header["pixdim"][1:4].tolist()[::-1] @staticmethod def pad(image, padding): pad_d, pad_w, pad_h = padding return np.pad( image, ( (0, 0), (math.floor(pad_d), math.ceil(pad_d)), (math.floor(pad_w), math.ceil(pad_w)), (math.floor(pad_h), math.ceil(pad_h)), ), ) @staticmethod def standardize_layout(data): if len(data.shape) == 3: data = np.expand_dims(data, 3) return np.transpose(data, (3, 2, 1, 0)) @staticmethod def resize_fn(image, shape, order, mode): return resize(image, shape, order=order, mode=mode, cval=0, clip=True, anti_aliasing=False) def resample_anisotrophic_image(self, image, shape): resized_channels = [] for image_c in image: resized = [self.resize_fn(i, shape[1:], 3, "edge") for i in image_c] resized = np.stack(resized, axis=0) resized = self.resize_fn(resized, shape, 0, "constant") resized_channels.append(resized) resized = np.stack(resized_channels, axis=0) return resized def resample_regular_image(self, image, shape): resized_channels = [] for image_c in image: resized_channels.append(self.resize_fn(image_c, shape, 3, "edge")) resized = np.stack(resized_channels, axis=0) return resized def resample_anisotrophic_label(self, label, shape): depth = label.shape[1] reshaped = np.zeros(shape, dtype=np.uint8) shape_2d = shape[1:] reshaped_2d = np.zeros((depth, *shape_2d), dtype=np.uint8) n_class = np.max(label) for class_ in range(1, n_class + 1): for depth_ in range(depth): mask = label[0, depth_] == class_ resized_2d = self.resize_fn(mask.astype(float), shape_2d, 1, "edge") reshaped_2d[depth_][resized_2d >= 0.5] = class_ for class_ in range(1, n_class + 1): mask = reshaped_2d == class_ resized = self.resize_fn(mask.astype(float), shape, 0, "constant") reshaped[resized >= 0.5] = class_ reshaped = np.expand_dims(reshaped, 0) return reshaped def resample_regular_label(self, label, shape): reshaped = np.zeros(shape, dtype=np.uint8) n_class = np.max(label) for class_ in range(1, n_class + 1): mask = label[0] == class_ resized = self.resize_fn(mask.astype(float), shape, 1, "edge") reshaped[resized >= 0.5] = class_ reshaped = np.expand_dims(reshaped, 0) return reshaped
.
DeepLearningExamples
.gitignore
repos.cfg repos_init.cfg nvtool*
TensorFlow/Detection/SSD/models/research/object_detection/utils
utils
label_map_util
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Label map utility functions.""" import logging import tensorflow as tf from google.protobuf import text_format from object_detection.protos import string_int_label_map_pb2 def _validate_label_map(label_map): """Checks if a label map is valid. Args: label_map: StringIntLabelMap to validate. Raises: ValueError: if label map is invalid. """ for item in label_map.item: if item.id < 0: raise ValueError('Label map ids should be >= 0.') if (item.id == 0 and item.name != 'background' and item.display_name != 'background'): raise ValueError('Label map id 0 is reserved for the background label') def create_category_index(categories): """Creates dictionary of COCO compatible categories keyed by category id. Args: categories: a list of dicts, each of which has the following keys: 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza'. Returns: category_index: a dict containing the same entries as categories, but keyed by the 'id' field of each category. """ category_index = {} for cat in categories: category_index[cat['id']] = cat return category_index def get_max_label_map_index(label_map): """Get maximum index in label map. Args: label_map: a StringIntLabelMapProto Returns: an integer """ return max([item.id for item in label_map.item]) def convert_label_map_to_categories(label_map, max_num_classes, use_display_name=True): """Given label map proto returns categories list compatible with eval. This function converts label map proto and returns a list of dicts, each of which has the following keys: 'id': (required) an integer id uniquely identifying this category. 'name': (required) string representing category name e.g., 'cat', 'dog', 'pizza'. We only allow class into the list if its id-label_id_offset is between 0 (inclusive) and max_num_classes (exclusive). If there are several items mapping to the same id in the label map, we will only keep the first one in the categories list. Args: label_map: a StringIntLabelMapProto or None. If None, a default categories list is created with max_num_classes categories. max_num_classes: maximum number of (consecutive) label indices to include. use_display_name: (boolean) choose whether to load 'display_name' field as category name. If False or if the display_name field does not exist, uses 'name' field as category names instead. Returns: categories: a list of dictionaries representing all possible categories. """ categories = [] list_of_ids_already_added = [] if not label_map: label_id_offset = 1 for class_id in range(max_num_classes): categories.append({ 'id': class_id + label_id_offset, 'name': 'category_{}'.format(class_id + label_id_offset) }) return categories for item in label_map.item: if not 0 < item.id <= max_num_classes: logging.info( 'Ignore item %d since it falls outside of requested ' 'label range.', item.id) continue if use_display_name and item.HasField('display_name'): name = item.display_name else: name = item.name if item.id not in list_of_ids_already_added: list_of_ids_already_added.append(item.id) categories.append({'id': item.id, 'name': name}) return categories def load_labelmap(path): """Loads label map proto. Args: path: path to StringIntLabelMap proto text file. Returns: a StringIntLabelMapProto """ with tf.gfile.GFile(path, 'r') as fid: label_map_string = fid.read() label_map = string_int_label_map_pb2.StringIntLabelMap() try: text_format.Merge(label_map_string, label_map) except text_format.ParseError: label_map.ParseFromString(label_map_string) _validate_label_map(label_map) return label_map def get_label_map_dict(label_map_path, use_display_name=False, fill_in_gaps_and_background=False): """Reads a label map and returns a dictionary of label names to id. Args: label_map_path: path to StringIntLabelMap proto text file. use_display_name: whether to use the label map items' display names as keys. fill_in_gaps_and_background: whether to fill in gaps and background with respect to the id field in the proto. The id: 0 is reserved for the 'background' class and will be added if it is missing. All other missing ids in range(1, max(id)) will be added with a dummy class name ("class_<id>") if they are missing. Returns: A dictionary mapping label names to id. Raises: ValueError: if fill_in_gaps_and_background and label_map has non-integer or negative values. """ label_map = load_labelmap(label_map_path) label_map_dict = {} for item in label_map.item: if use_display_name: label_map_dict[item.display_name] = item.id else: label_map_dict[item.name] = item.id if fill_in_gaps_and_background: values = set(label_map_dict.values()) if 0 not in values: label_map_dict['background'] = 0 if not all(isinstance(value, int) for value in values): raise ValueError('The values in label map must be integers in order to' 'fill_in_gaps_and_background.') if not all(value >= 0 for value in values): raise ValueError('The values in the label map must be positive.') if len(values) != max(values) + 1: # there are gaps in the labels, fill in gaps. for value in range(1, max(values)): if value not in values: label_map_dict['class_' + str(value)] = value return label_map_dict def create_categories_from_labelmap(label_map_path, use_display_name=True): """Reads a label map and returns categories list compatible with eval. This function converts label map proto and returns a list of dicts, each of which has the following keys: 'id': an integer id uniquely identifying this category. 'name': string representing category name e.g., 'cat', 'dog'. Args: label_map_path: Path to `StringIntLabelMap` proto text file. use_display_name: (boolean) choose whether to load 'display_name' field as category name. If False or if the display_name field does not exist, uses 'name' field as category names instead. Returns: categories: a list of dictionaries representing all possible categories. """ label_map = load_labelmap(label_map_path) max_num_classes = max(item.id for item in label_map.item) return convert_label_map_to_categories(label_map, max_num_classes, use_display_name) def create_category_index_from_labelmap(label_map_path, use_display_name=True): """Reads a label map and returns a category index. Args: label_map_path: Path to `StringIntLabelMap` proto text file. use_display_name: (boolean) choose whether to load 'display_name' field as category name. If False or if the display_name field does not exist, uses 'name' field as category names instead. Returns: A category index, which is a dictionary that maps integer ids to dicts containing categories, e.g. {1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...} """ categories = create_categories_from_labelmap(label_map_path, use_display_name) return create_category_index(categories) def create_class_agnostic_category_index(): """Creates a category index with a single `object` class.""" return {1: {'id': 1, 'name': 'object'}}
TensorFlow/Detection/SSD/models/research/object_detection/g3doc
g3doc
preparing_inputs
# Preparing Inputs Tensorflow Object Detection API reads data using the TFRecord file format. Two sample scripts (`create_pascal_tf_record.py` and `create_pet_tf_record.py`) are provided to convert from the PASCAL VOC dataset and Oxford-IIIT Pet dataset to TFRecords. ## Generating the PASCAL VOC TFRecord files. The raw 2012 PASCAL VOC data set is located [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar). To download, extract and convert it to TFRecords, run the following commands below: ```bash # From tensorflow/models/research/ wget http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar tar -xvf VOCtrainval_11-May-2012.tar python object_detection/dataset_tools/create_pascal_tf_record.py \ --label_map_path=object_detection/data/pascal_label_map.pbtxt \ --data_dir=VOCdevkit --year=VOC2012 --set=train \ --output_path=pascal_train.record python object_detection/dataset_tools/create_pascal_tf_record.py \ --label_map_path=object_detection/data/pascal_label_map.pbtxt \ --data_dir=VOCdevkit --year=VOC2012 --set=val \ --output_path=pascal_val.record ``` You should end up with two TFRecord files named `pascal_train.record` and `pascal_val.record` in the `tensorflow/models/research/` directory. The label map for the PASCAL VOC data set can be found at `object_detection/data/pascal_label_map.pbtxt`. ## Generating the Oxford-IIIT Pet TFRecord files. The Oxford-IIIT Pet data set is located [here](http://www.robots.ox.ac.uk/~vgg/data/pets/). To download, extract and convert it to TFRecrods, run the following commands below: ```bash # From tensorflow/models/research/ wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz wget http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz tar -xvf annotations.tar.gz tar -xvf images.tar.gz python object_detection/dataset_tools/create_pet_tf_record.py \ --label_map_path=object_detection/data/pet_label_map.pbtxt \ --data_dir=`pwd` \ --output_dir=`pwd` ``` You should end up with two 10-sharded TFRecord files named `pet_faces_train.record-?????-of-00010` and `pet_faces_val.record-?????-of-00010` in the `tensorflow/models/research/` directory. The label map for the Pet dataset can be found at `object_detection/data/pet_label_map.pbtxt`.
TensorFlow2/LanguageModeling/BERT/official/utils/misc
misc
distribution_utils
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper functions for running models in a distributed setting.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json import os import random import string import tensorflow as tf from official.utils.misc import tpu_lib def _collective_communication(all_reduce_alg): """Return a CollectiveCommunication based on all_reduce_alg. Args: all_reduce_alg: a string specifying which collective communication to pick, or None. Returns: tf.distribute.experimental.CollectiveCommunication object Raises: ValueError: if `all_reduce_alg` not in [None, 'ring', 'nccl'] """ collective_communication_options = { None: tf.distribute.experimental.CollectiveCommunication.AUTO, "ring": tf.distribute.experimental.CollectiveCommunication.RING, "nccl": tf.distribute.experimental.CollectiveCommunication.NCCL } if all_reduce_alg not in collective_communication_options: raise ValueError( "When used with `multi_worker_mirrored`, valid values for " "all_reduce_alg are ['ring', 'nccl']. Supplied value: {}".format( all_reduce_alg)) return collective_communication_options[all_reduce_alg] def _mirrored_cross_device_ops(all_reduce_alg, num_packs): """Return a CrossDeviceOps based on all_reduce_alg and num_packs. Args: all_reduce_alg: a string specifying which cross device op to pick, or None. num_packs: an integer specifying number of packs for the cross device op. Returns: tf.distribute.CrossDeviceOps object or None. Raises: ValueError: if `all_reduce_alg` not in [None, 'nccl', 'hierarchical_copy']. """ if all_reduce_alg is None: return None mirrored_all_reduce_options = { "nccl": tf.distribute.NcclAllReduce, "hierarchical_copy": tf.distribute.HierarchicalCopyAllReduce } if all_reduce_alg not in mirrored_all_reduce_options: raise ValueError( "When used with `mirrored`, valid values for all_reduce_alg are " "['nccl', 'hierarchical_copy']. Supplied value: {}".format( all_reduce_alg)) cross_device_ops_class = mirrored_all_reduce_options[all_reduce_alg] return cross_device_ops_class(num_packs=num_packs) def get_distribution_strategy(distribution_strategy="mirrored", num_gpus=0, num_workers=1, all_reduce_alg=None, num_packs=1, tpu_address=None): """Return a DistributionStrategy for running the model. Args: distribution_strategy: a string specifying which distribution strategy to use. Accepted values are 'off', 'one_device', 'mirrored', 'parameter_server', 'multi_worker_mirrored', and 'tpu' -- case insensitive. 'off' means not to use Distribution Strategy; 'tpu' means to use TPUStrategy using `tpu_address`. num_gpus: Number of GPUs to run this model. num_workers: Number of workers to run this model. all_reduce_alg: Optional. Specifies which algorithm to use when performing all-reduce. For `MirroredStrategy`, valid values are "nccl" and "hierarchical_copy". For `MultiWorkerMirroredStrategy`, valid values are "ring" and "nccl". If None, DistributionStrategy will choose based on device topology. num_packs: Optional. Sets the `num_packs` in `tf.distribute.NcclAllReduce` or `tf.distribute.HierarchicalCopyAllReduce` for `MirroredStrategy`. tpu_address: Optional. String that represents TPU to connect to. Must not be None if `distribution_strategy` is set to `tpu`. Returns: tf.distribute.DistibutionStrategy object. Raises: ValueError: if `distribution_strategy` is 'off' or 'one_device' and `num_gpus` is larger than 1; or `num_gpus` is negative or if `distribution_strategy` is `tpu` but `tpu_address` is not specified. """ if num_gpus < 0: raise ValueError("`num_gpus` can not be negative.") distribution_strategy = distribution_strategy.lower() if distribution_strategy == "off": return None if distribution_strategy == "tpu": # When tpu_address is an empty string, we communicate with local TPUs. cluster_resolver = tpu_lib.tpu_initialize(tpu_address) return tf.distribute.experimental.TPUStrategy(cluster_resolver) if distribution_strategy == "multi_worker_mirrored": return tf.distribute.experimental.MultiWorkerMirroredStrategy( communication=_collective_communication(all_reduce_alg)) if distribution_strategy == "one_device": if num_gpus == 0: return tf.distribute.OneDeviceStrategy("device:CPU:0") if num_gpus > 1: raise ValueError("`OneDeviceStrategy` can not be used for more than " "one device.") return tf.distribute.OneDeviceStrategy("device:GPU:0") if distribution_strategy == "mirrored": if num_gpus == 0: devices = ["device:CPU:0"] else: devices = ["device:GPU:%d" % i for i in range(num_gpus)] return tf.distribute.MirroredStrategy( devices=devices, cross_device_ops=_mirrored_cross_device_ops(all_reduce_alg, num_packs)) if distribution_strategy == "parameter_server": return tf.distribute.experimental.ParameterServerStrategy() raise ValueError( "Unrecognized Distribution Strategy: %r" % distribution_strategy) def per_replica_batch_size(batch_size, num_gpus): """For multi-gpu, batch-size must be a multiple of the number of GPUs. Note that distribution strategy handles this automatically when used with Keras. For using with Estimator, we need to get per GPU batch. Args: batch_size: Global batch size to be divided among devices. This should be equal to num_gpus times the single-GPU batch_size for multi-gpu training. num_gpus: How many GPUs are used with DistributionStrategies. Returns: Batch size per device. Raises: ValueError: if batch_size is not divisible by number of devices """ if num_gpus <= 1: return batch_size remainder = batch_size % num_gpus if remainder: err = ('When running with multiple GPUs, batch size ' 'must be a multiple of the number of available GPUs. Found {} ' 'GPUs with a batch size of {}; try --batch_size={} instead.' ).format(num_gpus, batch_size, batch_size - remainder) raise ValueError(err) return int(batch_size / num_gpus) # The `SyntheticDataset` is a temporary solution for generating synthetic data # directly on devices. It is only useful for Keras with Distribution # Strategies. We will have better support in `tf.data` or Distribution Strategy # later. class SyntheticDataset(object): """A dataset that generates synthetic data on each device.""" def __init__(self, dataset, split_by=1): # dataset.take(1) doesn't have GPU kernel. with tf.device('device:CPU:0'): tensor = tf.data.experimental.get_single_element(dataset.take(1)) flat_tensor = tf.nest.flatten(tensor) variable_data = [] initializers = [] for t in flat_tensor: rebatched_t = tf.split(t, num_or_size_splits=split_by, axis=0)[0] assert rebatched_t.shape.is_fully_defined(), rebatched_t.shape v = tf.compat.v1.get_local_variable(self._random_name(), initializer=rebatched_t) variable_data.append(v) initializers.append(v.initializer) input_data = tf.nest.pack_sequence_as(tensor, variable_data) self._iterator = SyntheticIterator(input_data, initializers) def _random_name(self, size=10, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for _ in range(size)) def __iter__(self): return self._iterator def make_one_shot_iterator(self): return self._iterator def make_initializable_iterator(self): return self._iterator class SyntheticIterator(object): """A dataset that generates synthetic data on each device.""" def __init__(self, input_data, initializers): self._input_data = input_data self._initializers = initializers def get_next(self): return self._input_data def next(self): return self.__next__() def __next__(self): try: return self.get_next() except tf.errors.OutOfRangeError: raise StopIteration def initialize(self): if tf.executing_eagerly(): return tf.no_op() else: return self._initializers def _monkey_patch_dataset_method(strategy): """Monkey-patch `strategy`'s `make_dataset_iterator` method.""" def make_dataset(self, dataset): tf.compat.v1.logging.info('Using pure synthetic data.') with self.scope(): if self.extended._global_batch_size: # pylint: disable=protected-access return SyntheticDataset(dataset, self.num_replicas_in_sync) else: return SyntheticDataset(dataset) def make_iterator(self, dataset): dist_dataset = make_dataset(self, dataset) return iter(dist_dataset) strategy.orig_make_dataset_iterator = strategy.make_dataset_iterator strategy.make_dataset_iterator = make_iterator strategy.orig_distribute_dataset = strategy.experimental_distribute_dataset strategy.experimental_distribute_dataset = make_dataset def _undo_monkey_patch_dataset_method(strategy): if hasattr(strategy, 'orig_make_dataset_iterator'): strategy.make_dataset_iterator = strategy.orig_make_dataset_iterator if hasattr(strategy, 'orig_distribute_dataset'): strategy.make_dataset_iterator = strategy.orig_distribute_dataset def set_up_synthetic_data(): _monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) _monkey_patch_dataset_method(tf.distribute.MirroredStrategy) _monkey_patch_dataset_method( tf.distribute.experimental.MultiWorkerMirroredStrategy) def undo_set_up_synthetic_data(): _undo_monkey_patch_dataset_method(tf.distribute.OneDeviceStrategy) _undo_monkey_patch_dataset_method(tf.distribute.MirroredStrategy) _undo_monkey_patch_dataset_method( tf.distribute.experimental.MultiWorkerMirroredStrategy) def configure_cluster(worker_hosts=None, task_index=-1): """Set multi-worker cluster spec in TF_CONFIG environment variable. Args: worker_hosts: comma-separated list of worker ip:port pairs. Returns: Number of workers in the cluster. """ tf_config = json.loads(os.environ.get('TF_CONFIG', '{}')) if tf_config: num_workers = (len(tf_config['cluster'].get('chief', [])) + len(tf_config['cluster'].get('worker', []))) elif worker_hosts: workers = worker_hosts.split(',') num_workers = len(workers) if num_workers > 1 and task_index < 0: raise ValueError('Must specify task_index when number of workers > 1') task_index = 0 if num_workers == 1 else task_index os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { 'worker': workers }, 'task': {'type': 'worker', 'index': task_index} }) else: num_workers = 1 return num_workers def get_strategy_scope(strategy): if strategy: strategy_scope = strategy.scope() else: strategy_scope = DummyContextManager() return strategy_scope class DummyContextManager(object): def __enter__(self): pass def __exit__(self, *args): pass
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools
dataset_tools
tf_record_creation_util_test
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf_record_creation_util.py.""" import os import contextlib2 import tensorflow as tf from object_detection.dataset_tools import tf_record_creation_util class OpenOutputTfrecordsTests(tf.test.TestCase): def test_sharded_tfrecord_writes(self): with contextlib2.ExitStack() as tf_record_close_stack: output_tfrecords = tf_record_creation_util.open_sharded_output_tfrecords( tf_record_close_stack, os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), 10) for idx in range(10): output_tfrecords[idx].write('test_{}'.format(idx)) for idx in range(10): tf_record_path = '{}-{:05d}-of-00010'.format( os.path.join(tf.test.get_temp_dir(), 'test.tfrec'), idx) records = list(tf.python_io.tf_record_iterator(tf_record_path)) self.assertAllEqual(records, ['test_{}'.format(idx)]) if __name__ == '__main__': tf.test.main()
TensorFlow2/LanguageModeling/ELECTRA/vocab
vocab
vocab
[PAD] [unused0] [unused1] [unused2] [unused3] [unused4] [unused5] [unused6] [unused7] [unused8] [unused9] [unused10] [unused11] [unused12] [unused13] [unused14] [unused15] [unused16] [unused17] [unused18] [unused19] [unused20] [unused21] [unused22] [unused23] [unused24] [unused25] [unused26] [unused27] [unused28] [unused29] [unused30] [unused31] [unused32] [unused33] [unused34] [unused35] [unused36] [unused37] [unused38] [unused39] [unused40] [unused41] [unused42] [unused43] [unused44] [unused45] [unused46] [unused47] [unused48] [unused49] [unused50] [unused51] [unused52] [unused53] [unused54] [unused55] [unused56] [unused57] [unused58] [unused59] [unused60] [unused61] [unused62] [unused63] [unused64] [unused65] [unused66] [unused67] [unused68] [unused69] [unused70] [unused71] [unused72] [unused73] [unused74] [unused75] [unused76] [unused77] [unused78] [unused79] [unused80] [unused81] [unused82] [unused83] [unused84] [unused85] [unused86] [unused87] [unused88] [unused89] [unused90] [unused91] [unused92] [unused93] [unused94] [unused95] [unused96] [unused97] [unused98] [UNK] [CLS] [SEP] [MASK] [unused99] [unused100] [unused101] [unused102] [unused103] [unused104] [unused105] [unused106] [unused107] [unused108] [unused109] [unused110] [unused111] [unused112] [unused113] [unused114] [unused115] [unused116] [unused117] [unused118] [unused119] [unused120] [unused121] [unused122] [unused123] [unused124] [unused125] [unused126] [unused127] [unused128] [unused129] [unused130] [unused131] [unused132] [unused133] [unused134] [unused135] [unused136] [unused137] [unused138] [unused139] [unused140] [unused141] [unused142] [unused143] [unused144] [unused145] [unused146] [unused147] [unused148] [unused149] [unused150] [unused151] [unused152] [unused153] [unused154] [unused155] [unused156] [unused157] [unused158] [unused159] [unused160] [unused161] [unused162] [unused163] [unused164] [unused165] [unused166] [unused167] [unused168] [unused169] [unused170] [unused171] [unused172] [unused173] [unused174] [unused175] [unused176] [unused177] [unused178] [unused179] [unused180] [unused181] [unused182] [unused183] [unused184] [unused185] [unused186] [unused187] [unused188] [unused189] [unused190] [unused191] [unused192] [unused193] [unused194] [unused195] [unused196] [unused197] [unused198] [unused199] [unused200] [unused201] [unused202] [unused203] [unused204] [unused205] [unused206] [unused207] [unused208] [unused209] [unused210] [unused211] [unused212] [unused213] [unused214] [unused215] [unused216] [unused217] [unused218] [unused219] [unused220] [unused221] [unused222] [unused223] [unused224] [unused225] [unused226] [unused227] [unused228] [unused229] [unused230] [unused231] [unused232] [unused233] [unused234] [unused235] [unused236] [unused237] [unused238] [unused239] [unused240] [unused241] [unused242] [unused243] [unused244] [unused245] [unused246] [unused247] [unused248] [unused249] [unused250] [unused251] [unused252] [unused253] [unused254] [unused255] [unused256] [unused257] [unused258] [unused259] [unused260] [unused261] [unused262] [unused263] [unused264] [unused265] [unused266] [unused267] [unused268] [unused269] [unused270] [unused271] [unused272] [unused273] [unused274] [unused275] [unused276] [unused277] [unused278] [unused279] [unused280] [unused281] [unused282] [unused283] [unused284] [unused285] [unused286] [unused287] [unused288] [unused289] [unused290] [unused291] [unused292] [unused293] [unused294] [unused295] [unused296] [unused297] [unused298] [unused299] [unused300] [unused301] [unused302] [unused303] [unused304] [unused305] [unused306] [unused307] [unused308] [unused309] [unused310] [unused311] [unused312] [unused313] [unused314] [unused315] [unused316] [unused317] [unused318] [unused319] [unused320] [unused321] [unused322] [unused323] [unused324] [unused325] [unused326] [unused327] [unused328] [unused329] [unused330] [unused331] [unused332] [unused333] [unused334] [unused335] [unused336] [unused337] [unused338] [unused339] [unused340] [unused341] [unused342] [unused343] [unused344] [unused345] [unused346] [unused347] [unused348] [unused349] [unused350] [unused351] [unused352] [unused353] [unused354] [unused355] [unused356] [unused357] [unused358] [unused359] [unused360] [unused361] [unused362] [unused363] [unused364] [unused365] [unused366] [unused367] [unused368] [unused369] [unused370] [unused371] [unused372] [unused373] [unused374] [unused375] [unused376] [unused377] [unused378] [unused379] [unused380] [unused381] [unused382] [unused383] [unused384] [unused385] [unused386] [unused387] [unused388] [unused389] [unused390] [unused391] [unused392] [unused393] [unused394] [unused395] [unused396] [unused397] [unused398] [unused399] [unused400] [unused401] [unused402] [unused403] [unused404] [unused405] [unused406] [unused407] [unused408] [unused409] [unused410] [unused411] [unused412] [unused413] [unused414] [unused415] [unused416] [unused417] [unused418] [unused419] [unused420] [unused421] [unused422] [unused423] [unused424] [unused425] [unused426] [unused427] [unused428] [unused429] [unused430] [unused431] [unused432] [unused433] [unused434] [unused435] [unused436] [unused437] [unused438] [unused439] [unused440] [unused441] [unused442] [unused443] [unused444] [unused445] [unused446] [unused447] [unused448] [unused449] [unused450] [unused451] [unused452] [unused453] [unused454] [unused455] [unused456] [unused457] [unused458] [unused459] [unused460] [unused461] [unused462] [unused463] [unused464] [unused465] [unused466] [unused467] [unused468] [unused469] [unused470] [unused471] [unused472] [unused473] [unused474] [unused475] [unused476] [unused477] [unused478] [unused479] [unused480] [unused481] [unused482] [unused483] [unused484] [unused485] [unused486] [unused487] [unused488] [unused489] [unused490] [unused491] [unused492] [unused493] [unused494] [unused495] [unused496] [unused497] [unused498] [unused499] [unused500] [unused501] [unused502] [unused503] [unused504] [unused505] [unused506] [unused507] [unused508] [unused509] [unused510] [unused511] [unused512] [unused513] [unused514] [unused515] [unused516] [unused517] [unused518] [unused519] [unused520] [unused521] [unused522] [unused523] [unused524] [unused525] [unused526] [unused527] [unused528] [unused529] [unused530] [unused531] [unused532] [unused533] [unused534] [unused535] [unused536] [unused537] [unused538] [unused539] [unused540] [unused541] [unused542] [unused543] [unused544] [unused545] [unused546] [unused547] [unused548] [unused549] [unused550] [unused551] [unused552] [unused553] [unused554] [unused555] [unused556] [unused557] [unused558] [unused559] [unused560] [unused561] [unused562] [unused563] [unused564] [unused565] [unused566] [unused567] [unused568] [unused569] [unused570] [unused571] [unused572] [unused573] [unused574] [unused575] [unused576] [unused577] [unused578] [unused579] [unused580] [unused581] [unused582] [unused583] [unused584] [unused585] [unused586] [unused587] [unused588] [unused589] [unused590] [unused591] [unused592] [unused593] [unused594] [unused595] [unused596] [unused597] [unused598] [unused599] [unused600] [unused601] [unused602] [unused603] [unused604] [unused605] [unused606] [unused607] [unused608] [unused609] [unused610] [unused611] [unused612] [unused613] [unused614] [unused615] [unused616] [unused617] [unused618] [unused619] [unused620] [unused621] [unused622] [unused623] [unused624] [unused625] [unused626] [unused627] [unused628] [unused629] [unused630] [unused631] [unused632] [unused633] [unused634] [unused635] [unused636] [unused637] [unused638] [unused639] [unused640] [unused641] [unused642] [unused643] [unused644] [unused645] [unused646] [unused647] [unused648] [unused649] [unused650] [unused651] [unused652] [unused653] [unused654] [unused655] [unused656] [unused657] [unused658] [unused659] [unused660] [unused661] [unused662] [unused663] [unused664] [unused665] [unused666] [unused667] [unused668] [unused669] [unused670] [unused671] [unused672] [unused673] [unused674] [unused675] [unused676] [unused677] [unused678] [unused679] [unused680] [unused681] [unused682] [unused683] [unused684] [unused685] [unused686] [unused687] [unused688] [unused689] [unused690] [unused691] [unused692] [unused693] [unused694] [unused695] [unused696] [unused697] [unused698] [unused699] [unused700] [unused701] [unused702] [unused703] [unused704] [unused705] [unused706] [unused707] [unused708] [unused709] [unused710] [unused711] [unused712] [unused713] [unused714] [unused715] [unused716] [unused717] [unused718] [unused719] [unused720] [unused721] [unused722] [unused723] [unused724] [unused725] [unused726] [unused727] [unused728] [unused729] [unused730] [unused731] [unused732] [unused733] [unused734] [unused735] [unused736] [unused737] [unused738] [unused739] [unused740] [unused741] [unused742] [unused743] [unused744] [unused745] [unused746] [unused747] [unused748] [unused749] [unused750] [unused751] [unused752] [unused753] [unused754] [unused755] [unused756] [unused757] [unused758] [unused759] [unused760] [unused761] [unused762] [unused763] [unused764] [unused765] [unused766] [unused767] [unused768] [unused769] [unused770] [unused771] [unused772] [unused773] [unused774] [unused775] [unused776] [unused777] [unused778] [unused779] [unused780] [unused781] [unused782] [unused783] [unused784] [unused785] [unused786] [unused787] [unused788] [unused789] [unused790] [unused791] [unused792] [unused793] [unused794] [unused795] [unused796] [unused797] [unused798] [unused799] [unused800] [unused801] [unused802] [unused803] [unused804] [unused805] [unused806] [unused807] [unused808] [unused809] [unused810] [unused811] [unused812] [unused813] [unused814] [unused815] [unused816] [unused817] [unused818] [unused819] [unused820] [unused821] [unused822] [unused823] [unused824] [unused825] [unused826] [unused827] [unused828] [unused829] [unused830] [unused831] [unused832] [unused833] [unused834] [unused835] [unused836] [unused837] [unused838] [unused839] [unused840] [unused841] [unused842] [unused843] [unused844] [unused845] [unused846] [unused847] [unused848] [unused849] [unused850] [unused851] [unused852] [unused853] [unused854] [unused855] [unused856] [unused857] [unused858] [unused859] [unused860] [unused861] [unused862] [unused863] [unused864] [unused865] [unused866] [unused867] [unused868] [unused869] [unused870] [unused871] [unused872] [unused873] [unused874] [unused875] [unused876] [unused877] [unused878] [unused879] [unused880] [unused881] [unused882] [unused883] [unused884] [unused885] [unused886] [unused887] [unused888] [unused889] [unused890] [unused891] [unused892] [unused893] [unused894] [unused895] [unused896] [unused897] [unused898] [unused899] [unused900] [unused901] [unused902] [unused903] [unused904] [unused905] [unused906] [unused907] [unused908] [unused909] [unused910] [unused911] [unused912] [unused913] [unused914] [unused915] [unused916] [unused917] [unused918] [unused919] [unused920] [unused921] [unused922] [unused923] [unused924] [unused925] [unused926] [unused927] [unused928] [unused929] [unused930] [unused931] [unused932] [unused933] [unused934] [unused935] [unused936] [unused937] [unused938] [unused939] [unused940] [unused941] [unused942] [unused943] [unused944] [unused945] [unused946] [unused947] [unused948] [unused949] [unused950] [unused951] [unused952] [unused953] [unused954] [unused955] [unused956] [unused957] [unused958] [unused959] [unused960] [unused961] [unused962] [unused963] [unused964] [unused965] [unused966] [unused967] [unused968] [unused969] [unused970] [unused971] [unused972] [unused973] [unused974] [unused975] [unused976] [unused977] [unused978] [unused979] [unused980] [unused981] [unused982] [unused983] [unused984] [unused985] [unused986] [unused987] [unused988] [unused989] [unused990] [unused991] [unused992] [unused993] ! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ? @ [ \ ] ^ _ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ ¡ ¢ £ ¤ ¥ ¦ § ¨ © ª « ¬ ® ° ± ² ³ ´ µ ¶ · ¹ º » ¼ ½ ¾ ¿ × ß æ ð ÷ ø þ đ ħ ı ł ŋ œ ƒ ɐ ɑ ɒ ɔ ɕ ə ɛ ɡ ɣ ɨ ɪ ɫ ɬ ɯ ɲ ɴ ɹ ɾ ʀ ʁ ʂ ʃ ʉ ʊ ʋ ʌ ʎ ʐ ʑ ʒ ʔ ʰ ʲ ʳ ʷ ʸ ʻ ʼ ʾ ʿ ˈ ː ˡ ˢ ˣ ˤ α β γ δ ε ζ η θ ι κ λ μ ν ξ ο π ρ ς σ τ υ φ χ ψ ω а б в г д е ж з и к л м н о п р с т у ф х ц ч ш щ ъ ы ь э ю я ђ є і ј љ њ ћ ӏ ա բ գ դ ե թ ի լ կ հ մ յ ն ո պ ս վ տ ր ւ ք ־ א ב ג ד ה ו ז ח ט י ך כ ל ם מ ן נ ס ע ף פ ץ צ ק ר ש ת ، ء ا ب ة ت ث ج ح خ د ذ ر ز س ش ص ض ط ظ ع غ ـ ف ق ك ل م ن ه و ى ي ٹ پ چ ک گ ں ھ ہ ی ے अ आ उ ए क ख ग च ज ट ड ण त थ द ध न प ब भ म य र ल व श ष स ह ा ि ी ो । ॥ ং অ আ ই উ এ ও ক খ গ চ ছ জ ট ড ণ ত থ দ ধ ন প ব ভ ম য র ল শ ষ স হ া ি ী ে க ச ட த ந ன ப ம ய ர ல ள வ ா ி ு ே ை ನ ರ ಾ ක ය ර ල ව ා ก ง ต ท น พ ม ย ร ล ว ส อ า เ ་ ། ག ང ད ན པ བ མ འ ར ལ ས မ ა ბ გ დ ე ვ თ ი კ ლ მ ნ ო რ ს ტ უ ᄀ ᄂ ᄃ ᄅ ᄆ ᄇ ᄉ ᄊ ᄋ ᄌ ᄎ ᄏ ᄐ ᄑ ᄒ ᅡ ᅢ ᅥ ᅦ ᅧ ᅩ ᅪ ᅭ ᅮ ᅯ ᅲ ᅳ ᅴ ᅵ ᆨ ᆫ ᆯ ᆷ ᆸ ᆼ ᴬ ᴮ ᴰ ᴵ ᴺ ᵀ ᵃ ᵇ ᵈ ᵉ ᵍ ᵏ ᵐ ᵒ ᵖ ᵗ ᵘ ᵢ ᵣ ᵤ ᵥ ᶜ ᶠ ‐ ‑ ‒ – — ― ‖ ‘ ’ ‚ “ ” „ † ‡ • … ‰ ′ ″ › ‿ ⁄ ⁰ ⁱ ⁴ ⁵ ⁶ ⁷ ⁸ ⁹ ⁺ ⁻ ⁿ ₀ ₁ ₂ ₃ ₄ ₅ ₆ ₇ ₈ ₉ ₊ ₍ ₎ ₐ ₑ ₒ ₓ ₕ ₖ ₗ ₘ ₙ ₚ ₛ ₜ ₤ ₩ € ₱ ₹ ℓ № ℝ ™ ⅓ ⅔ ← ↑ → ↓ ↔ ↦ ⇄ ⇌ ⇒ ∂ ∅ ∆ ∇ ∈ − ∗ ∘ √ ∞ ∧ ∨ ∩ ∪ ≈ ≡ ≤ ≥ ⊂ ⊆ ⊕ ⊗ ⋅ ─ │ ■ ▪ ● ★ ☆ ☉ ♠ ♣ ♥ ♦ ♭ ♯ ⟨ ⟩ ⱼ ⺩ ⺼ ⽥ 、 。 〈 〉 《 》 「 」 『 』 〜 あ い う え お か き く け こ さ し す せ そ た ち っ つ て と な に ぬ ね の は ひ ふ へ ほ ま み む め も や ゆ よ ら り る れ ろ を ん ァ ア ィ イ ウ ェ エ オ カ キ ク ケ コ サ シ ス セ タ チ ッ ツ テ ト ナ ニ ノ ハ ヒ フ ヘ ホ マ ミ ム メ モ ャ ュ ョ ラ リ ル レ ロ ワ ン ・ ー 一 三 上 下 不 世 中 主 久 之 也 事 二 五 井 京 人 亻 仁 介 代 仮 伊 会 佐 侍 保 信 健 元 光 八 公 内 出 分 前 劉 力 加 勝 北 区 十 千 南 博 原 口 古 史 司 合 吉 同 名 和 囗 四 国 國 土 地 坂 城 堂 場 士 夏 外 大 天 太 夫 奈 女 子 学 宀 宇 安 宗 定 宣 宮 家 宿 寺 將 小 尚 山 岡 島 崎 川 州 巿 帝 平 年 幸 广 弘 張 彳 後 御 德 心 忄 志 忠 愛 成 我 戦 戸 手 扌 政 文 新 方 日 明 星 春 昭 智 曲 書 月 有 朝 木 本 李 村 東 松 林 森 楊 樹 橋 歌 止 正 武 比 氏 民 水 氵 氷 永 江 沢 河 治 法 海 清 漢 瀬 火 版 犬 王 生 田 男 疒 発 白 的 皇 目 相 省 真 石 示 社 神 福 禾 秀 秋 空 立 章 竹 糹 美 義 耳 良 艹 花 英 華 葉 藤 行 街 西 見 訁 語 谷 貝 貴 車 軍 辶 道 郎 郡 部 都 里 野 金 鈴 镇 長 門 間 阝 阿 陳 陽 雄 青 面 風 食 香 馬 高 龍 龸 fi fl ! ( ) , - . / : ? ~ the of and in to was he is as for on with that it his by at from her ##s she you had an were but be this are not my they one which or have him me first all also their has up who out been when after there into new two its ##a time would no what about said we over then other so more ##e can if like back them only some could ##i where just ##ing during before ##n do ##o made school through than now years most world may between down well three ##d year while will ##ed ##r ##y later ##t city under around did such being used state people part know against your many second university both national ##er these don known off way until re how even get head ... didn ##ly team american because de ##l born united film since still long work south us became any high again day family see right man eyes house season war states including took life north same each called name much place however go four group another found won area here going 10 away series left home music best make hand number company several never last john 000 very album take end good too following released game played little began district ##m old want those side held own early county ll league use west ##u face think ##es 2010 government ##h march came small general town june ##on line based something ##k september thought looked along international 2011 air july club went january october our august april york 12 few 2012 2008 east show member college 2009 father public ##us come men five set station church ##c next former november room party located december 2013 age got 2007 ##g system let love 2006 though every 2014 look song water century without body black night within great women single ve building large population river named band white started ##an once 15 20 should 18 2015 service top built british open death king moved local times children february book why 11 door need president order final road wasn although due major died village third knew 2016 asked turned st wanted say ##p together received main son served different ##en behind himself felt members power football law voice play ##in near park history 30 having 2005 16 ##man saw mother ##al army point front help english street art late hands games award ##ia young 14 put published country division across told 13 often ever french london center six red 2017 led days include light 25 find tell among species really according central half 2004 form original gave office making enough lost full opened must included live given german player run business woman community cup might million land 2000 court development 17 short round ii km seen class story always become sure research almost director council la ##2 career things using island ##z couldn car ##is 24 close force ##1 better free support control field students 2003 education married ##b nothing worked others record big inside level anything continued give james ##3 military established non returned feel does title written thing feet william far co association hard already 2002 ##ra championship human western 100 ##na department hall role various production 21 19 heart 2001 living fire version ##ers ##f television royal ##4 produced working act case society region present radio period looking least total keep england wife program per brother mind special 22 ##le am works soon ##6 political george services taken created ##7 further able reached david union joined upon done important social information either ##ic ##x appeared position ground lead rock dark election 23 board france hair course arms site police girl instead real sound ##v words moment ##te someone ##8 summer project announced san less wrote past followed ##5 blue founded al finally india taking records america ##ne 1999 design considered northern god stop battle toward european outside described track today playing language 28 call 26 heard professional low australia miles california win yet green ##ie trying blood ##ton southern science maybe everything match square 27 mouth video race recorded leave above ##9 daughter points space 1998 museum change middle common ##0 move tv post ##ta lake seven tried elected closed ten paul minister ##th months start chief return canada person sea release similar modern brought rest hit formed mr ##la 1997 floor event doing thomas 1996 robert care killed training star week needed turn finished railway rather news health sent example ran term michael coming currently yes forces despite gold areas 50 stage fact 29 dead says popular 2018 originally germany probably developed result pulled friend stood money running mi signed word songs child eventually met tour average teams minutes festival current deep kind 1995 decided usually eastern seemed ##ness episode bed added table indian private charles route available idea throughout centre addition appointed style 1994 books eight construction press mean wall friends remained schools study ##ch ##um institute oh chinese sometimes events possible 1992 australian type brown forward talk process food debut seat performance committee features character arts herself else lot strong russian range hours peter arm ##da morning dr sold ##ry quickly directed 1993 guitar china ##w 31 list ##ma performed media uk players smile ##rs myself 40 placed coach province towards wouldn leading whole boy official designed grand census ##el europe attack japanese henry 1991 ##re ##os cross getting alone action lower network wide washington japan 1990 hospital believe changed sister ##ar hold gone sir hadn ship ##ka studies academy shot rights below base bad involved kept largest ##ist bank future especially beginning mark movement section female magazine plan professor lord longer ##ian sat walked hill actually civil energy model families size thus aircraft completed includes data captain ##or fight vocals featured richard bridge fourth 1989 officer stone hear ##ism means medical groups management self lips competition entire lived technology leaving federal tournament bit passed hot independent awards kingdom mary spent fine doesn reported ##ling jack fall raised itself stay true studio 1988 sports replaced paris systems saint leader theatre whose market capital parents spanish canadian earth ##ity cut degree writing bay christian awarded natural higher bill ##as coast provided previous senior ft valley organization stopped onto countries parts conference queen security interest saying allowed master earlier phone matter smith winning try happened moving campaign los ##ley breath nearly mid 1987 certain girls date italian african standing fell artist ##ted shows deal mine industry 1986 ##ng everyone republic provide collection library student ##ville primary owned older via heavy 1st makes ##able attention anyone africa ##ri stated length ended fingers command staff skin foreign opening governor okay medal kill sun cover job 1985 introduced chest hell feeling ##ies success meet reason standard meeting novel 1984 trade source buildings ##land rose guy goal ##ur chapter native husband previously unit limited entered weeks producer operations mountain takes covered forced related roman complete successful key texas cold ##ya channel 1980 traditional films dance clear approximately 500 nine van prince question active tracks ireland regional silver author personal sense operation ##ine economic 1983 holding twenty isbn additional speed hour edition regular historic places whom shook movie km² secretary prior report chicago read foundation view engine scored 1982 units ask airport property ready immediately lady month listed contract ##de manager themselves lines ##ki navy writer meant ##ts runs ##ro practice championships singer glass commission required forest starting culture generally giving access attended test couple stand catholic martin caught executive ##less eye ##ey thinking chair quite shoulder 1979 hope decision plays defeated municipality whether structure offered slowly pain ice direction ##ion paper mission 1981 mostly 200 noted individual managed nature lives plant ##ha helped except studied computer figure relationship issue significant loss die smiled gun ago highest 1972 ##am male bring goals mexico problem distance commercial completely location annual famous drive 1976 neck 1978 surface caused italy understand greek highway wrong hotel comes appearance joseph double issues musical companies castle income review assembly bass initially parliament artists experience 1974 particular walk foot engineering talking window dropped ##ter miss baby boys break 1975 stars edge remember policy carried train stadium bar sex angeles evidence ##ge becoming assistant soviet 1977 upper step wing 1970 youth financial reach ##ll actor numerous ##se ##st nodded arrived ##ation minute ##nt believed sorry complex beautiful victory associated temple 1968 1973 chance perhaps metal ##son 1945 bishop ##et lee launched particularly tree le retired subject prize contains yeah theory empire ##ce suddenly waiting trust recording ##to happy terms camp champion 1971 religious pass zealand names 2nd port ancient tom corner represented watch legal anti justice cause watched brothers 45 material changes simply response louis fast ##ting answer 60 historical 1969 stories straight create feature increased rate administration virginia el activities cultural overall winner programs basketball legs guard beyond cast doctor mm flight results remains cost effect winter ##ble larger islands problems chairman grew commander isn 1967 pay failed selected hurt fort box regiment majority journal 35 edward plans ##ke ##ni shown pretty irish characters directly scene likely operated allow spring ##j junior matches looks mike houses fellow ##tion beach marriage ##ham ##ive rules oil 65 florida expected nearby congress sam peace recent iii wait subsequently cell ##do variety serving agreed please poor joe pacific attempt wood democratic piece prime ##ca rural mile touch appears township 1964 1966 soldiers ##men ##ized 1965 pennsylvania closer fighting claimed score jones physical editor ##ous filled genus specific sitting super mom ##va therefore supported status fear cases store meaning wales minor spain tower focus vice frank follow parish separate golden horse fifth remaining branch 32 presented stared ##id uses secret forms ##co baseball exactly ##ck choice note discovered travel composed truth russia ball color kiss dad wind continue ring referred numbers digital greater ##ns metres slightly direct increase 1960 responsible crew rule trees troops ##no broke goes individuals hundred weight creek sleep memory defense provides ordered code value jewish windows 1944 safe judge whatever corps realized growing pre ##ga cities alexander gaze lies spread scott letter showed situation mayor transport watching workers extended ##li expression normal ##ment chart multiple border ##ba host ##ner daily mrs walls piano ##ko heat cannot ##ate earned products drama era authority seasons join grade ##io sign difficult machine 1963 territory mainly ##wood stations squadron 1962 stepped iron 19th ##led serve appear sky speak broken charge knowledge kilometres removed ships article campus simple ##ty pushed britain ##ve leaves recently cd soft boston latter easy acquired poland ##sa quality officers presence planned nations mass broadcast jean share image influence wild offer emperor electric reading headed ability promoted yellow ministry 1942 throat smaller politician ##by latin spoke cars williams males lack pop 80 ##ier acting seeing consists ##ti estate 1961 pressure johnson newspaper jr chris olympics online conditions beat elements walking vote ##field needs carolina text featuring global block shirt levels francisco purpose females et dutch duke ahead gas twice safety serious turning highly lieutenant firm maria amount mixed daniel proposed perfect agreement affairs 3rd seconds contemporary paid 1943 prison save kitchen label administrative intended constructed academic nice teacher races 1956 formerly corporation ben nation issued shut 1958 drums housing victoria seems opera 1959 graduated function von mentioned picked build recognized shortly protection picture notable exchange elections 1980s loved percent racing fish elizabeth garden volume hockey 1941 beside settled ##ford 1940 competed replied drew 1948 actress marine scotland steel glanced farm steve 1957 risk tonight positive magic singles effects gray screen dog ##ja residents bus sides none secondary literature polish destroyed flying founder households 1939 lay reserve usa gallery ##ler 1946 industrial younger approach appearances urban ones 1950 finish avenue powerful fully growth page honor jersey projects advanced revealed basic 90 infantry pair equipment visit 33 evening search grant effort solo treatment buried republican primarily bottom owner 1970s israel gives jim dream bob remain spot 70 notes produce champions contact ed soul accepted ways del ##ally losing split price capacity basis trial questions ##ina 1955 20th guess officially memorial naval initial ##ization whispered median engineer ##ful sydney ##go columbia strength 300 1952 tears senate 00 card asian agent 1947 software 44 draw warm supposed com pro ##il transferred leaned ##at candidate escape mountains asia potential activity entertainment seem traffic jackson murder 36 slow product orchestra haven agency bbc taught website comedy unable storm planning albums rugby environment scientific grabbed protect ##hi boat typically 1954 1953 damage principal divided dedicated mount ohio ##berg pick fought driver ##der empty shoulders sort thank berlin prominent account freedom necessary efforts alex headquarters follows alongside des simon andrew suggested operating learning steps 1949 sweet technical begin easily 34 teeth speaking settlement scale ##sh renamed ray max enemy semi joint compared ##rd scottish leadership analysis offers georgia pieces captured animal deputy guest organized ##lin tony combined method challenge 1960s huge wants battalion sons rise crime types facilities telling path 1951 platform sit 1990s ##lo tells assigned rich pull ##ot commonly alive ##za letters concept conducted wearing happen bought becomes holy gets ocean defeat languages purchased coffee occurred titled ##q declared applied sciences concert sounds jazz brain ##me painting fleet tax nick ##ius michigan count animals leaders episodes ##line content ##den birth ##it clubs 64 palace critical refused fair leg laughed returning surrounding participated formation lifted pointed connected rome medicine laid taylor santa powers adam tall shared focused knowing yards entrance falls ##wa calling ##ad sources chosen beneath resources yard ##ite nominated silence zone defined ##que gained thirty 38 bodies moon ##ard adopted christmas widely register apart iran premier serves du unknown parties ##les generation ##ff continues quick fields brigade quiet teaching clothes impact weapons partner flat theater supreme 1938 37 relations ##tor plants suffered 1936 wilson kids begins ##age 1918 seats armed internet models worth laws 400 communities classes background knows thanks quarter reaching humans carry killing format kong hong setting 75 architecture disease railroad inc possibly wish arthur thoughts harry doors density ##di crowd illinois stomach tone unique reports anyway ##ir liberal der vehicle thick dry drug faced largely facility theme holds creation strange colonel ##mi revolution bell politics turns silent rail relief independence combat shape write determined sales learned 4th finger oxford providing 1937 heritage fiction situated designated allowing distribution hosted ##est sight interview estimated reduced ##ria toronto footballer keeping guys damn claim motion sport sixth stayed ##ze en rear receive handed twelve dress audience granted brazil ##well spirit ##ated noticed etc olympic representative eric tight trouble reviews drink vampire missing roles ranked newly household finals wave critics ##ee phase massachusetts pilot unlike philadelphia bright guns crown organizations roof 42 respectively clearly tongue marked circle fox korea bronze brian expanded sexual supply yourself inspired labour fc ##ah reference vision draft connection brand reasons 1935 classic driving trip jesus cells entry 1920 neither trail claims atlantic orders labor nose afraid identified intelligence calls cancer attacked passing stephen positions imperial grey jason 39 sunday 48 swedish avoid extra uncle message covers allows surprise materials fame hunter ##ji 1930 citizens figures davis environmental confirmed shit titles di performing difference acts attacks ##ov existing votes opportunity nor shop entirely trains opposite pakistan ##pa develop resulted representatives actions reality pressed ##ish barely wine conversation faculty northwest ends documentary nuclear stock grace sets eat alternative ##ps bag resulting creating surprised cemetery 1919 drop finding sarah cricket streets tradition ride 1933 exhibition target ear explained rain composer injury apartment municipal educational occupied netherlands clean billion constitution learn 1914 maximum classical francis lose opposition jose ontario bear core hills rolled ending drawn permanent fun ##tes ##lla lewis sites chamber ryan ##way scoring height 1934 ##house lyrics staring 55 officials 1917 snow oldest ##tic orange ##ger qualified interior apparently succeeded thousand dinner lights existence fans heavily 41 greatest conservative send bowl plus enter catch ##un economy duty 1929 speech authorities princess performances versions shall graduate pictures effective remembered poetry desk crossed starring starts passenger sharp ##ant acres ass weather falling rank fund supporting check adult publishing heads cm southeast lane ##burg application bc ##ura les condition transfer prevent display ex regions earl federation cool relatively answered besides 1928 obtained portion ##town mix ##ding reaction liked dean express peak 1932 ##tte counter religion chain rare miller convention aid lie vehicles mobile perform squad wonder lying crazy sword ##ping attempted centuries weren philosophy category ##ize anna interested 47 sweden wolf frequently abandoned kg literary alliance task entitled ##ay threw promotion factory tiny soccer visited matt fm achieved 52 defence internal persian 43 methods ##ging arrested otherwise cambridge programming villages elementary districts rooms criminal conflict worry trained 1931 attempts waited signal bird truck subsequent programme ##ol ad 49 communist details faith sector patrick carrying laugh ##ss controlled korean showing origin fuel evil 1927 ##ent brief identity darkness address pool missed publication web planet ian anne wings invited ##tt briefly standards kissed ##be ideas climate causing walter worse albert articles winners desire aged northeast dangerous gate doubt 1922 wooden multi ##ky poet rising funding 46 communications communication violence copies prepared ford investigation skills 1924 pulling electronic ##ak ##ial ##han containing ultimately offices singing understanding restaurant tomorrow fashion christ ward da pope stands 5th flow studios aired commissioned contained exist fresh americans ##per wrestling approved kid employed respect suit 1925 angel asking increasing frame angry selling 1950s thin finds ##nd temperature statement ali explain inhabitants towns extensive narrow 51 jane flowers images promise somewhere object fly closely ##ls 1912 bureau cape 1926 weekly presidential legislative 1921 ##ai ##au launch founding ##ny 978 ##ring artillery strike un institutions roll writers landing chose kevin anymore pp ##ut attorney fit dan billboard receiving agricultural breaking sought dave admitted lands mexican ##bury charlie specifically hole iv howard credit moscow roads accident 1923 proved wear struck hey guards stuff slid expansion 1915 cat anthony ##kin melbourne opposed sub southwest architect failure plane 1916 ##ron map camera tank listen regarding wet introduction metropolitan link ep fighter inch grown gene anger fixed buy dvd khan domestic worldwide chapel mill functions examples ##head developing 1910 turkey hits pocket antonio papers grow unless circuit 18th concerned attached journalist selection journey converted provincial painted hearing aren bands negative aside wondered knight lap survey ma ##ow noise billy ##ium shooting guide bedroom priest resistance motor homes sounded giant ##mer 150 scenes equal comic patients hidden solid actual bringing afternoon touched funds wedding consisted marie canal sr kim treaty turkish recognition residence cathedral broad knees incident shaped fired norwegian handle cheek contest represent ##pe representing beauty ##sen birds advantage emergency wrapped drawing notice pink broadcasting ##ong somehow bachelor seventh collected registered establishment alan assumed chemical personnel roger retirement jeff portuguese wore tied device threat progress advance ##ised banks hired manchester nfl teachers structures forever ##bo tennis helping saturday sale applications junction hip incorporated neighborhood dressed ceremony ##ds influenced hers visual stairs decades inner kansas hung hoped gain scheduled downtown engaged austria clock norway certainly pale protected 1913 victor employees plate putting surrounded ##ists finishing blues tropical ##ries minnesota consider philippines accept 54 retrieved 1900 concern anderson properties institution gordon successfully vietnam ##dy backing outstanding muslim crossing folk producing usual demand occurs observed lawyer educated ##ana kelly string pleasure budget items quietly colorado philip typical ##worth derived 600 survived asks mental ##ide 56 jake jews distinguished ltd 1911 sri extremely 53 athletic loud thousands worried shadow transportation horses weapon arena importance users tim objects contributed dragon douglas aware senator johnny jordan sisters engines flag investment samuel shock capable clark row wheel refers session familiar biggest wins hate maintained drove hamilton request expressed injured underground churches walker wars tunnel passes stupid agriculture softly cabinet regarded joining indiana ##ea ##ms push dates spend behavior woods protein gently chase morgan mention burning wake combination occur mirror leads jimmy indeed impossible singapore paintings covering ##nes soldier locations attendance sell historian wisconsin invasion argued painter diego changing egypt ##don experienced inches ##ku missouri vol grounds spoken switzerland ##gan reform rolling ha forget massive resigned burned allen tennessee locked values improved ##mo wounded universe sick dating facing pack purchase user ##pur moments ##ul merged anniversary 1908 coal brick understood causes dynasty queensland establish stores crisis promote hoping views cards referee extension ##si raise arizona improve colonial formal charged ##rt palm lucky hide rescue faces 95 feelings candidates juan ##ell goods 6th courses weekend 59 luke cash fallen ##om delivered affected installed carefully tries swiss hollywood costs lincoln responsibility ##he shore file proper normally maryland assistance jump constant offering friendly waters persons realize contain trophy 800 partnership factor 58 musicians cry bound oregon indicated hero houston medium ##ure consisting somewhat ##ara 57 cycle ##che beer moore frederick gotten eleven worst weak approached arranged chin loan universal bond fifteen pattern disappeared ##ney translated ##zed lip arab capture interests insurance ##chi shifted cave prix warning sections courts coat plot smell feed golf favorite maintain knife vs voted degrees finance quebec opinion translation manner ruled operate productions choose musician discovery confused tired separated stream techniques committed attend ranking kings throw passengers measure horror fan mining sand danger salt calm decade dam require runner ##ik rush associate greece ##ker rivers consecutive matthew ##ski sighed sq documents steam edited closing tie accused 1905 ##ini islamic distributed directors organisation bruce 7th breathing mad lit arrival concrete taste 08 composition shaking faster amateur adjacent stating 1906 twin flew ##ran tokyo publications ##tone obviously ridge storage 1907 carl pages concluded desert driven universities ages terminal sequence borough 250 constituency creative cousin economics dreams margaret notably reduce montreal mode 17th ears saved jan vocal ##ica 1909 andy ##jo riding roughly threatened ##ise meters meanwhile landed compete repeated grass czech regularly charges tea sudden appeal ##ung solution describes pierre classification glad parking ##ning belt physics 99 rachel add hungarian participate expedition damaged gift childhood 85 fifty ##red mathematics jumped letting defensive mph ##ux ##gh testing ##hip hundreds shoot owners matters smoke israeli kentucky dancing mounted grandfather emma designs profit argentina ##gs truly li lawrence cole begun detroit willing branches smiling decide miami enjoyed recordings ##dale poverty ethnic gay ##bi gary arabic 09 accompanied ##one ##ons fishing determine residential acid ##ary alice returns starred mail ##ang jonathan strategy ##ue net forty cook businesses equivalent commonwealth distinct ill ##cy seriously ##ors ##ped shift harris replace rio imagine formula ensure ##ber additionally scheme conservation occasionally purposes feels favor ##and ##ore 1930s contrast hanging hunt movies 1904 instruments victims danish christopher busy demon sugar earliest colony studying balance duties ##ks belgium slipped carter 05 visible stages iraq fifa ##im commune forming zero 07 continuing talked counties legend bathroom option tail clay daughters afterwards severe jaw visitors ##ded devices aviation russell kate ##vi entering subjects ##ino temporary swimming forth smooth ghost audio bush operates rocks movements signs eddie ##tz ann voices honorary 06 memories dallas pure measures racial promised 66 harvard ceo 16th parliamentary indicate benefit flesh dublin louisiana 1902 1901 patient sleeping 1903 membership coastal medieval wanting element scholars rice 62 limit survive makeup rating definitely collaboration obvious ##tan boss ms baron birthday linked soil diocese ##lan ncaa ##mann offensive shell shouldn waist ##tus plain ross organ resolution manufacturing adding relative kennedy 98 whilst moth marketing gardens crash 72 heading partners credited carlos moves cable ##zi marshall ##out depending bottle represents rejected responded existed 04 jobs denmark lock ##ating treated graham routes talent commissioner drugs secure tests reign restored photography ##gi contributions oklahoma designer disc grin seattle robin paused atlanta unusual ##gate praised las laughing satellite hungary visiting ##sky interesting factors deck poems norman ##water stuck speaker rifle domain premiered ##her dc comics actors 01 reputation eliminated 8th ceiling prisoners script ##nce leather austin mississippi rapidly admiral parallel charlotte guilty tools gender divisions fruit ##bs laboratory nelson fantasy marry rapid aunt tribe requirements aspects suicide amongst adams bone ukraine abc kick sees edinburgh clothing column rough gods hunting broadway gathered concerns ##ek spending ty 12th snapped requires solar bones cavalry ##tta iowa drinking waste index franklin charity thompson stewart tip flash landscape friday enjoy singh poem listening ##back eighth fred differences adapted bomb ukrainian surgery corporate masters anywhere ##more waves odd sean portugal orleans dick debate kent eating puerto cleared 96 expect cinema 97 guitarist blocks electrical agree involving depth dying panel struggle ##ged peninsula adults novels emerged vienna metro debuted shoes tamil songwriter meets prove beating instance heaven scared sending marks artistic passage superior 03 significantly shopping ##tive retained ##izing malaysia technique cheeks ##ola warren maintenance destroy extreme allied 120 appearing ##yn fill advice alabama qualifying policies cleveland hat battery smart authors 10th soundtrack acted dated lb glance equipped coalition funny outer ambassador roy possibility couples campbell dna loose ethan supplies 1898 gonna 88 monster ##res shake agents frequency springs dogs practices 61 gang plastic easier suggests gulf blade exposed colors industries markets pan nervous electoral charts legislation ownership ##idae mac appointment shield copy assault socialist abbey monument license throne employment jay 93 replacement charter cloud powered suffering accounts oak connecticut strongly wright colour crystal 13th context welsh networks voiced gabriel jerry ##cing forehead mp ##ens manage schedule totally remix ##ii forests occupation print nicholas brazilian strategic vampires engineers 76 roots seek correct instrumental und alfred backed hop ##des stanley robinson traveled wayne welcome austrian achieve 67 exit rates 1899 strip whereas ##cs sing deeply adventure bobby rick jamie careful components cap useful personality knee ##shi pushing hosts 02 protest ca ottoman symphony ##sis 63 boundary 1890 processes considering considerable tons ##work ##ft ##nia cooper trading dear conduct 91 illegal apple revolutionary holiday definition harder ##van jacob circumstances destruction ##lle popularity grip classified liverpool donald baltimore flows seeking honour approval 92 mechanical till happening statue critic increasingly immediate describe commerce stare ##ster indonesia meat rounds boats baker orthodox depression formally worn naked claire muttered sentence 11th emily document 77 criticism wished vessel spiritual bent virgin parker minimum murray lunch danny printed compilation keyboards false blow belonged 68 raising 78 cutting ##board pittsburgh ##up 9th shadows 81 hated indigenous jon 15th barry scholar ah ##zer oliver ##gy stick susan meetings attracted spell romantic ##ver ye 1895 photo demanded customers ##ac 1896 logan revival keys modified commanded jeans ##ious upset raw phil detective hiding resident vincent ##bly experiences diamond defeating coverage lucas external parks franchise helen bible successor percussion celebrated il lift profile clan romania ##ied mills ##su nobody achievement shrugged fault 1897 rhythm initiative breakfast carbon 700 69 lasted violent 74 wound ken killer gradually filmed °c dollars processing 94 remove criticized guests sang chemistry ##vin legislature disney ##bridge uniform escaped integrated proposal purple denied liquid karl influential morris nights stones intense experimental twisted 71 84 ##ld pace nazi mitchell ny blind reporter newspapers 14th centers burn basin forgotten surviving filed collections monastery losses manual couch description appropriate merely tag missions sebastian restoration replacing triple 73 elder julia warriors benjamin julian convinced stronger amazing declined versus merchant happens output finland bare barbara absence ignored dawn injuries ##port producers ##ram 82 luis ##ities kw admit expensive electricity nba exception symbol ##ving ladies shower sheriff characteristics ##je aimed button ratio effectively summit angle jury bears foster vessels pants executed evans dozen advertising kicked patrol 1889 competitions lifetime principles athletics ##logy birmingham sponsored 89 rob nomination 1893 acoustic ##sm creature longest ##tra credits harbor dust josh ##so territories milk infrastructure completion thailand indians leon archbishop ##sy assist pitch blake arrangement girlfriend serbian operational hence sad scent fur dj sessions hp refer rarely ##ora exists 1892 ##ten scientists dirty penalty burst portrait seed 79 pole limits rival 1894 stable alpha grave constitutional alcohol arrest flower mystery devil architectural relationships greatly habitat ##istic larry progressive remote cotton ##ics ##ok preserved reaches ##ming cited 86 vast scholarship decisions cbs joy teach 1885 editions knocked eve searching partly participation gap animated fate excellent ##ett na 87 alternate saints youngest ##ily climbed ##ita ##tors suggest ##ct discussion staying choir lakes jacket revenue nevertheless peaked instrument wondering annually managing neil 1891 signing terry ##ice apply clinical brooklyn aim catherine fuck farmers figured ninth pride hugh evolution ordinary involvement comfortable shouted tech encouraged taiwan representation sharing ##lia ##em panic exact cargo competing fat cried 83 1920s occasions pa cabin borders utah marcus ##isation badly muscles ##ance victorian transition warner bet permission ##rin slave terrible similarly shares seth uefa possession medals benefits colleges lowered perfectly mall transit ##ye ##kar publisher ##ened harrison deaths elevation ##ae asleep machines sigh ash hardly argument occasion parent leo decline 1888 contribution ##ua concentration 1000 opportunities hispanic guardian extent emotions hips mason volumes bloody controversy diameter steady mistake phoenix identify violin ##sk departure richmond spin funeral enemies 1864 gear literally connor random sergeant grab confusion 1865 transmission informed op leaning sacred suspended thinks gates portland luck agencies yours hull expert muscle layer practical sculpture jerusalem latest lloyd statistics deeper recommended warrior arkansas mess supports greg eagle 1880 recovered rated concerts rushed ##ano stops eggs files premiere keith ##vo delhi turner pit affair belief paint ##zing mate ##ach ##ev victim ##ology withdrew bonus styles fled ##ud glasgow technologies funded nbc adaptation ##ata portrayed cooperation supporters judges bernard justin hallway ralph ##ick graduating controversial distant continental spider bite ##ho recognize intention mixing ##ese egyptian bow tourism suppose claiming tiger dominated participants vi ##ru nurse partially tape ##rum psychology ##rn essential touring duo voting civilian emotional channels ##king apparent hebrew 1887 tommy carrier intersection beast hudson ##gar ##zo lab nova bench discuss costa ##ered detailed behalf drivers unfortunately obtain ##lis rocky ##dae siege friendship honey ##rian 1861 amy hang posted governments collins respond wildlife preferred operator ##po laura pregnant videos dennis suspected boots instantly weird automatic businessman alleged placing throwing ph mood 1862 perry venue jet remainder ##lli ##ci passion biological boyfriend 1863 dirt buffalo ron segment fa abuse ##era genre thrown stroke colored stress exercise displayed ##gen struggled ##tti abroad dramatic wonderful thereafter madrid component widespread ##sed tale citizen todd monday 1886 vancouver overseas forcing crying descent ##ris discussed substantial ranks regime 1870 provinces switch drum zane ted tribes proof lp cream researchers volunteer manor silk milan donated allies venture principle delivery enterprise ##ves ##ans bars traditionally witch reminded copper ##uk pete inter links colin grinned elsewhere competitive frequent ##oy scream ##hu tension texts submarine finnish defending defend pat detail 1884 affiliated stuart themes villa periods tool belgian ruling crimes answers folded licensed resort demolished hans lucy 1881 lion traded photographs writes craig ##fa trials generated beth noble debt percentage yorkshire erected ss viewed grades confidence ceased islam telephone retail ##ible chile m² roberts sixteen ##ich commented hampshire innocent dual pounds checked regulations afghanistan sung rico liberty assets bigger options angels relegated tribute wells attending leaf ##yan butler romanian forum monthly lisa patterns gmina ##tory madison hurricane rev ##ians bristol ##ula elite valuable disaster democracy awareness germans freyja ##ins loop absolutely paying populations maine sole prayer spencer releases doorway bull ##ani lover midnight conclusion ##sson thirteen lily mediterranean ##lt nhl proud sample ##hill drummer guinea ##ova murphy climb ##ston instant attributed horn ain railways steven ##ao autumn ferry opponent root traveling secured corridor stretched tales sheet trinity cattle helps indicates manhattan murdered fitted 1882 gentle grandmother mines shocked vegas produces ##light caribbean ##ou belong continuous desperate drunk historically trio waved raf dealing nathan bat murmured interrupted residing scientist pioneer harold aaron ##net delta attempting minority mini believes chorus tend lots eyed indoor load shots updated jail ##llo concerning connecting wealth ##ved slaves arrive rangers sufficient rebuilt ##wick cardinal flood muhammad whenever relation runners moral repair viewers arriving revenge punk assisted bath fairly breathe lists innings illustrated whisper nearest voters clinton ties ultimate screamed beijing lions andre fictional gathering comfort radar suitable dismissed hms ban pine wrist atmosphere voivodeship bid timber ##ned ##nan giants ##ane cameron recovery uss identical categories switched serbia laughter noah ensemble therapy peoples touching ##off locally pearl platforms everywhere ballet tables lanka herbert outdoor toured derek 1883 spaces contested swept 1878 exclusive slight connections ##dra winds prisoner collective bangladesh tube publicly wealthy thai ##ys isolated select ##ric insisted pen fortune ticket spotted reportedly animation enforcement tanks 110 decides wider lowest owen ##time nod hitting ##hn gregory furthermore magazines fighters solutions ##ery pointing requested peru reed chancellor knights mask worker eldest flames reduction 1860 volunteers ##tis reporting ##hl wire advisory endemic origins settlers pursue knock consumer 1876 eu compound creatures mansion sentenced ivan deployed guitars frowned involves mechanism kilometers perspective shops maps terminus duncan alien fist bridges ##pers heroes fed derby swallowed ##ros patent sara illness characterized adventures slide hawaii jurisdiction ##op organised ##side adelaide walks biology se ##ties rogers swing tightly boundaries ##rie prepare implementation stolen ##sha certified colombia edwards garage ##mm recalled ##ball rage harm nigeria breast ##ren furniture pupils settle ##lus cuba balls client alaska 21st linear thrust celebration latino genetic terror ##cia ##ening lightning fee witness lodge establishing skull ##ique earning hood ##ei rebellion wang sporting warned missile devoted activist porch worship fourteen package 1871 decorated ##shire housed ##ock chess sailed doctors oscar joan treat garcia harbour jeremy ##ire traditions dominant jacques ##gon ##wan relocated 1879 amendment sized companion simultaneously volleyball spun acre increases stopping loves belongs affect drafted tossed scout battles 1875 filming shoved munich tenure vertical romance pc ##cher argue ##ical craft ranging www opens honest tyler yesterday virtual ##let muslims reveal snake immigrants radical screaming speakers firing saving belonging ease lighting prefecture blame farmer hungry grows rubbed beam sur subsidiary ##cha armenian sao dropping conventional ##fer microsoft reply qualify spots 1867 sweat festivals ##ken immigration physician discover exposure sandy explanation isaac implemented ##fish hart initiated connect stakes presents heights householder pleased tourist regardless slip closest ##ction surely sultan brings riley preparation aboard slammed baptist experiment ongoing interstate organic playoffs ##ika 1877 130 ##tar hindu error tours tier plenty arrangements talks trapped excited sank ho athens 1872 denver welfare suburb athletes trick diverse belly exclusively yelled 1868 ##med conversion ##ette 1874 internationally computers conductor abilities sensitive hello dispute measured globe rocket prices amsterdam flights tigers inn municipalities emotion references 3d ##mus explains airlines manufactured pm archaeological 1873 interpretation devon comment ##ites settlements kissing absolute improvement suite impressed barcelona sullivan jefferson towers jesse julie ##tin ##lu grandson hi gauge regard rings interviews trace raymond thumb departments burns serial bulgarian scores demonstrated ##ix 1866 kyle alberta underneath romanized ##ward relieved acquisition phrase cliff reveals han cuts merger custom ##dar nee gilbert graduation ##nts assessment cafe difficulty demands swung democrat jennifer commons 1940s grove ##yo completing focuses sum substitute bearing stretch reception ##py reflected essentially destination pairs ##ched survival resource ##bach promoting doubles messages tear ##down ##fully parade florence harvey incumbent partial framework 900 pedro frozen procedure olivia controls ##mic shelter personally temperatures ##od brisbane tested sits marble comprehensive oxygen leonard ##kov inaugural iranian referring quarters attitude ##ivity mainstream lined mars dakota norfolk unsuccessful ##° explosion helicopter congressional ##sing inspector bitch seal departed divine ##ters coaching examination punishment manufacturer sink columns unincorporated signals nevada squeezed dylan dining photos martial manuel eighteen elevator brushed plates ministers ivy congregation ##len slept specialized taxes curve restricted negotiations likes statistical arnold inspiration execution bold intermediate significance margin ruler wheels gothic intellectual dependent listened eligible buses widow syria earn cincinnati collapsed recipient secrets accessible philippine maritime goddess clerk surrender breaks playoff database ##ified ##lon ideal beetle aspect soap regulation strings expand anglo shorter crosses retreat tough coins wallace directions pressing ##oon shipping locomotives comparison topics nephew ##mes distinction honors travelled sierra ibn ##over fortress sa recognised carved 1869 clients ##dan intent ##mar coaches describing bread ##ington beaten northwestern ##ona merit youtube collapse challenges em historians objective submitted virus attacking drake assume ##ere diseases marc stem leeds ##cus ##ab farming glasses ##lock visits nowhere fellowship relevant carries restaurants experiments 101 constantly bases targets shah tenth opponents verse territorial ##ira writings corruption ##hs instruction inherited reverse emphasis ##vic employee arch keeps rabbi watson payment uh ##ala nancy ##tre venice fastest sexy banned adrian properly ruth touchdown dollar boards metre circles edges favour comments ok travels liberation scattered firmly ##ular holland permitted diesel kenya den originated ##ral demons resumed dragged rider ##rus servant blinked extend torn ##ias ##sey input meal everybody cylinder kinds camps ##fe bullet logic ##wn croatian evolved healthy fool chocolate wise preserve pradesh ##ess respective 1850 ##ew chicken artificial gross corresponding convicted cage caroline dialogue ##dor narrative stranger mario br christianity failing trent commanding buddhist 1848 maurice focusing yale bike altitude ##ering mouse revised ##sley veteran ##ig pulls theology crashed campaigns legion ##ability drag excellence customer cancelled intensity excuse ##lar liga participating contributing printing ##burn variable ##rk curious bin legacy renaissance ##my symptoms binding vocalist dancer ##nie grammar gospel democrats ya enters sc diplomatic hitler ##ser clouds mathematical quit defended oriented ##heim fundamental hardware impressive equally convince confederate guilt chuck sliding ##ware magnetic narrowed petersburg bulgaria otto phd skill ##ama reader hopes pitcher reservoir hearts automatically expecting mysterious bennett extensively imagined seeds monitor fix ##ative journalism struggling signature ranch encounter photographer observation protests ##pin influences ##hr calendar ##all cruz croatia locomotive hughes naturally shakespeare basement hook uncredited faded theories approaches dare phillips filling fury obama ##ain efficient arc deliver min raid breeding inducted leagues efficiency axis montana eagles ##ked supplied instructions karen picking indicating trap anchor practically christians tomb vary occasional electronics lords readers newcastle faint innovation collect situations engagement 160 claude mixture ##feld peer tissue logo lean ##ration °f floors ##ven architects reducing ##our ##ments rope 1859 ottawa ##har samples banking declaration proteins resignation francois saudi advocate exhibited armor twins divorce ##ras abraham reviewed jo temporarily matrix physically pulse curled ##ena difficulties bengal usage ##ban annie riders certificate ##pi holes warsaw distinctive jessica ##mon mutual 1857 customs circular eugene removal loaded mere vulnerable depicted generations dame heir enormous lightly climbing pitched lessons pilots nepal ram google preparing brad louise renowned ##₂ liam ##ably plaza shaw sophie brilliant bills ##bar ##nik fucking mainland server pleasant seized veterans jerked fail beta brush radiation stored warmth southeastern nate sin raced berkeley joke athlete designation trunk ##low roland qualification archives heels artwork receives judicial reserves ##bed woke installation abu floating fake lesser excitement interface concentrated addressed characteristic amanda saxophone monk auto ##bus releasing egg dies interaction defender ce outbreak glory loving ##bert sequel consciousness http awake ski enrolled ##ress handling rookie brow somebody biography warfare amounts contracts presentation fabric dissolved challenged meter psychological lt elevated rally accurate ##tha hospitals undergraduate specialist venezuela exhibit shed nursing protestant fluid structural footage jared consistent prey ##ska succession reflect exile lebanon wiped suspect shanghai resting integration preservation marvel variant pirates sheep rounded capita sailing colonies manuscript deemed variations clarke functional emerging boxing relaxed curse azerbaijan heavyweight nickname editorial rang grid tightened earthquake flashed miguel rushing ##ches improvements boxes brooks 180 consumption molecular felix societies repeatedly variation aids civic graphics professionals realm autonomous receiver delayed workshop militia chairs trump canyon ##point harsh extending lovely happiness ##jan stake eyebrows embassy wellington hannah ##ella sony corners bishops swear cloth contents xi namely commenced 1854 stanford nashville courage graphic commitment garrison ##bin hamlet clearing rebels attraction literacy cooking ruins temples jenny humanity celebrate hasn freight sixty rebel bastard ##art newton ##ada deer ##ges ##ching smiles delaware singers ##ets approaching assists flame ##ph boulevard barrel planted ##ome pursuit ##sia consequences posts shallow invitation rode depot ernest kane rod concepts preston topic chambers striking blast arrives descendants montgomery ranges worlds ##lay ##ari span chaos praise ##ag fewer 1855 sanctuary mud fbi ##ions programmes maintaining unity harper bore handsome closure tournaments thunder nebraska linda facade puts satisfied argentine dale cork dome panama ##yl 1858 tasks experts ##ates feeding equation ##las ##ida ##tu engage bryan ##ax um quartet melody disbanded sheffield blocked gasped delay kisses maggie connects ##non sts poured creator publishers ##we guided ellis extinct hug gaining ##ord complicated ##bility poll clenched investigate ##use thereby quantum spine cdp humor kills administered semifinals ##du encountered ignore ##bu commentary ##maker bother roosevelt 140 plains halfway flowing cultures crack imprisoned neighboring airline ##ses ##view ##mate ##ec gather wolves marathon transformed ##ill cruise organisations carol punch exhibitions numbered alarm ratings daddy silently ##stein queens colours impression guidance liu tactical ##rat marshal della arrow ##ings rested feared tender owns bitter advisor escort ##ides spare farms grants ##ene dragons encourage colleagues cameras ##und sucked pile spirits prague statements suspension landmark fence torture recreation bags permanently survivors pond spy predecessor bombing coup ##og protecting transformation glow ##lands ##book dug priests andrea feat barn jumping ##chen ##ologist ##con casualties stern auckland pipe serie revealing ba ##bel trevor mercy spectrum yang consist governing collaborated possessed epic comprises blew shane ##ack lopez honored magical sacrifice judgment perceived hammer mtv baronet tune das missionary sheets 350 neutral oral threatening attractive shade aims seminary ##master estates 1856 michel wounds refugees manufacturers ##nic mercury syndrome porter ##iya ##din hamburg identification upstairs purse widened pause cared breathed affiliate santiago prevented celtic fisher 125 recruited byzantine reconstruction farther ##mp diet sake au spite sensation ##ert blank separation 105 ##hon vladimir armies anime ##lie accommodate orbit cult sofia archive ##ify ##box founders sustained disorder honours northeastern mia crops violet threats blanket fires canton followers southwestern prototype voyage assignment altered moderate protocol pistol ##eo questioned brass lifting 1852 math authored ##ual doug dimensional dynamic ##san 1851 pronounced grateful quest uncomfortable boom presidency stevens relating politicians chen barrier quinn diana mosque tribal cheese palmer portions sometime chester treasure wu bend download millions reforms registration ##osa consequently monitoring ate preliminary brandon invented ps eaten exterior intervention ports documented log displays lecture sally favourite ##itz vermont lo invisible isle breed ##ator journalists relay speaks backward explore midfielder actively stefan procedures cannon blond kenneth centered servants chains libraries malcolm essex henri slavery ##hal facts fairy coached cassie cats washed cop ##fi announcement item 2000s vinyl activated marco frontier growled curriculum ##das loyal accomplished leslie ritual kenny ##00 vii napoleon hollow hybrid jungle stationed friedrich counted ##ulated platinum theatrical seated col rubber glen 1840 diversity healing extends id provisions administrator columbus ##oe tributary te assured org ##uous prestigious examined lectures grammy ronald associations bailey allan essays flute believing consultant proceedings travelling 1853 kit kerala yugoslavia buddy methodist ##ith burial centres batman ##nda discontinued bo dock stockholm lungs severely ##nk citing manga ##ugh steal mumbai iraqi robot celebrity bride broadcasts abolished pot joel overhead franz packed reconnaissance johann acknowledged introduce handled doctorate developments drinks alley palestine ##nis ##aki proceeded recover bradley grain patch afford infection nationalist legendary ##ath interchange virtually gen gravity exploration amber vital wishes powell doctrine elbow screenplay ##bird contribute indonesian pet creates ##com enzyme kylie discipline drops manila hunger ##ien layers suffer fever bits monica keyboard manages ##hood searched appeals ##bad testament grande reid ##war beliefs congo ##ification ##dia si requiring ##via casey 1849 regret streak rape depends syrian sprint pound tourists upcoming pub ##xi tense ##els practiced echo nationwide guild motorcycle liz ##zar chiefs desired elena bye precious absorbed relatives booth pianist ##mal citizenship exhausted wilhelm ##ceae ##hed noting quarterback urge hectares ##gue ace holly ##tal blonde davies parked sustainable stepping twentieth airfield galaxy nest chip ##nell tan shaft paulo requirement ##zy paradise tobacco trans renewed vietnamese ##cker ##ju suggesting catching holmes enjoying md trips colt holder butterfly nerve reformed cherry bowling trailer carriage goodbye appreciate toy joshua interactive enabled involve ##kan collar determination bunch facebook recall shorts superintendent episcopal frustration giovanni nineteenth laser privately array circulation ##ovic armstrong deals painful permit discrimination ##wi aires retiring cottage ni ##sta horizon ellen jamaica ripped fernando chapters playstation patron lecturer navigation behaviour genes georgian export solomon rivals swift seventeen rodriguez princeton independently sox 1847 arguing entity casting hank criteria oakland geographic milwaukee reflection expanding conquest dubbed ##tv halt brave brunswick doi arched curtis divorced predominantly somerset streams ugly zoo horrible curved buenos fierce dictionary vector theological unions handful stability chan punjab segments ##lly altar ignoring gesture monsters pastor ##stone thighs unexpected operators abruptly coin compiled associates improving migration pin ##ose compact collegiate reserved ##urs quarterfinals roster restore assembled hurry oval ##cies 1846 flags martha ##del victories sharply ##rated argues deadly neo drawings symbols performer ##iel griffin restrictions editing andrews java journals arabia compositions dee pierce removing hindi casino runway civilians minds nasa hotels ##zation refuge rent retain potentially conferences suburban conducting ##tto ##tions ##tle descended massacre ##cal ammunition terrain fork souls counts chelsea durham drives cab ##bank perth realizing palestinian finn simpson ##dal betty ##ule moreover particles cardinals tent evaluation extraordinary ##oid inscription ##works wednesday chloe maintains panels ashley trucks ##nation cluster sunlight strikes zhang ##wing dialect canon ##ap tucked ##ws collecting ##mas ##can ##sville maker quoted evan franco aria buying cleaning eva closet provision apollo clinic rat ##ez necessarily ac ##gle ##ising venues flipped cent spreading trustees checking authorized ##sco disappointed ##ado notion duration trumpet hesitated topped brussels rolls theoretical hint define aggressive repeat wash peaceful optical width allegedly mcdonald strict copyright ##illa investors mar jam witnesses sounding miranda michelle privacy hugo harmony ##pp valid lynn glared nina 102 headquartered diving boarding gibson ##ncy albanian marsh routine dealt enhanced er intelligent substance targeted enlisted discovers spinning observations pissed smoking rebecca capitol visa varied costume seemingly indies compensation surgeon thursday arsenal westminster suburbs rid anglican ##ridge knots foods alumni lighter fraser whoever portal scandal ##ray gavin advised instructor flooding terrorist ##ale teenage interim senses duck teen thesis abby eager overcome ##ile newport glenn rises shame ##cc prompted priority forgot bomber nicolas protective 360 cartoon katherine breeze lonely trusted henderson richardson relax banner candy palms remarkable ##rio legends cricketer essay ordained edmund rifles trigger ##uri ##away sail alert 1830 audiences penn sussex siblings pursued indianapolis resist rosa consequence succeed avoided 1845 ##ulation inland ##tie ##nna counsel profession chronicle hurried ##una eyebrow eventual bleeding innovative cure ##dom committees accounting con scope hardy heather tenor gut herald codes tore scales wagon ##oo luxury tin prefer fountain triangle bonds darling convoy dried traced beings troy accidentally slam findings smelled joey lawyers outcome steep bosnia configuration shifting toll brook performers lobby philosophical construct shrine aggregate boot cox phenomenon savage insane solely reynolds lifestyle ##ima nationally holdings consideration enable edgar mo mama ##tein fights relegation chances atomic hub conjunction awkward reactions currency finale kumar underwent steering elaborate gifts comprising melissa veins reasonable sunshine chi solve trails inhabited elimination ethics huh ana molly consent apartments layout marines ##ces hunters bulk ##oma hometown ##wall ##mont cracked reads neighbouring withdrawn admission wingspan damned anthology lancashire brands batting forgive cuban awful ##lyn 104 dimensions imagination ##ade dante ##ship tracking desperately goalkeeper ##yne groaned workshops confident burton gerald milton circus uncertain slope copenhagen sophia fog philosopher portraits accent cycling varying gripped larvae garrett specified scotia mature luther kurt rap ##kes aerial 750 ferdinand heated es transported ##shan safely nonetheless ##orn ##gal motors demanding ##sburg startled ##brook ally generate caps ghana stained demo mentions beds ap afterward diary ##bling utility ##iro richards 1837 conspiracy conscious shining footsteps observer cyprus urged loyalty developer probability olive upgraded gym miracle insects graves 1844 ourselves hydrogen amazon katie tickets poets ##pm planes ##pan prevention witnessed dense jin randy tang warehouse monroe bang archived elderly investigations alec granite mineral conflicts controlling aboriginal carlo ##zu mechanics stan stark rhode skirt est ##berry bombs respected ##horn imposed limestone deny nominee memphis grabbing disabled ##als amusement aa frankfurt corn referendum varies slowed disk firms unconscious incredible clue sue ##zhou twist ##cio joins idaho chad developers computing destroyer 103 mortal tucker kingston choices yu carson 1800 os whitney geneva pretend dimension staged plateau maya ##une freestyle ##bc rovers hiv ##ids tristan classroom prospect ##hus honestly diploma lied thermal auxiliary feast unlikely iata ##tel morocco pounding treasury lithuania considerably 1841 dish 1812 geological matching stumbled destroying marched brien advances cake nicole belle settling measuring directing ##mie tuesday bassist capabilities stunned fraud torpedo ##list ##phone anton wisdom surveillance ruined ##ulate lawsuit healthcare theorem halls trend aka horizontal dozens acquire lasting swim hawk gorgeous fees vicinity decrease adoption tactics ##ography pakistani ##ole draws ##hall willie burke heath algorithm integral powder elliott brigadier jackie tate varieties darker ##cho lately cigarette specimens adds ##ree ##ensis ##inger exploded finalist cia murders wilderness arguments nicknamed acceptance onwards manufacture robertson jets tampa enterprises blog loudly composers nominations 1838 ai malta inquiry automobile hosting viii rays tilted grief museums strategies furious euro equality cohen poison surrey wireless governed ridiculous moses ##esh ##room vanished ##ito barnes attract morrison istanbul ##iness absent rotation petition janet ##logical satisfaction custody deliberately observatory comedian surfaces pinyin novelist strictly canterbury oslo monks embrace ibm jealous photograph continent dorothy marina doc excess holden allegations explaining stack avoiding lance storyline majesty poorly spike dos bradford raven travis classics proven voltage pillow fists butt 1842 interpreted ##car 1839 gage telegraph lens promising expelled casual collector zones ##min silly nintendo ##kh ##bra downstairs chef suspicious afl flies vacant uganda pregnancy condemned lutheran estimates cheap decree saxon proximity stripped idiot deposits contrary presenter magnus glacier im offense edwin ##ori upright ##long bolt ##ois toss geographical ##izes environments delicate marking abstract xavier nails windsor plantation occurring equity saskatchewan fears drifted sequences vegetation revolt ##stic 1843 sooner fusion opposing nato skating 1836 secretly ruin lease ##oc edit ##nne flora anxiety ruby ##ological ##mia tel bout taxi emmy frost rainbow compounds foundations rainfall assassination nightmare dominican ##win achievements deserve orlando intact armenia ##nte calgary valentine 106 marion proclaimed theodore bells courtyard thigh gonzalez console troop minimal monte everyday ##ence ##if supporter terrorism buck openly presbyterian activists carpet ##iers rubbing uprising ##yi cute conceived legally ##cht millennium cello velocity ji rescued cardiff 1835 rex concentrate senators beard rendered glowing battalions scouts competitors sculptor catalogue arctic ion raja bicycle wow glancing lawn ##woman gentleman lighthouse publish predicted calculated ##val variants ##gne strain ##ui winston deceased ##nus touchdowns brady caleb sinking echoed crush hon blessed protagonist hayes endangered magnitude editors ##tine estimate responsibilities ##mel backup laying consumed sealed zurich lovers frustrated ##eau ahmed kicking mit treasurer 1832 biblical refuse terrified pump agrees genuine imprisonment refuses plymouth ##hen lou ##nen tara trembling antarctic ton learns ##tas crap crucial faction atop ##borough wrap lancaster odds hopkins erik lyon ##eon bros ##ode snap locality tips empress crowned cal acclaimed chuckled ##ory clara sends mild towel ##fl ##day ##а wishing assuming interviewed ##bal ##die interactions eden cups helena ##lf indie beck ##fire batteries filipino wizard parted ##lam traces ##born rows idol albany delegates ##ees ##sar discussions ##ex notre instructed belgrade highways suggestion lauren possess orientation alexandria abdul beats salary reunion ludwig alright wagner intimate pockets slovenia hugged brighton merchants cruel stole trek slopes repairs enrollment politically underlying promotional counting boeing ##bb isabella naming ##и keen bacteria listing separately belfast ussr 450 lithuanian anybody ribs sphere martinez cock embarrassed proposals fragments nationals ##fs ##wski premises fin 1500 alpine matched freely bounded jace sleeve ##af gaming pier populated evident ##like frances flooded ##dle frightened pour trainer framed visitor challenging pig wickets ##fold infected email ##pes arose ##aw reward ecuador oblast vale ch shuttle ##usa bach rankings forbidden cornwall accordance salem consumers bruno fantastic toes machinery resolved julius remembering propaganda iceland bombardment tide contacts wives ##rah concerto macdonald albania implement daisy tapped sudan helmet angela mistress ##lic crop sunk finest ##craft hostile ##ute ##tsu boxer fr paths adjusted habit ballot supervision soprano ##zen bullets wicked sunset regiments disappear lamp performs app ##gia ##oa rabbit digging incidents entries ##cion dishes ##oi introducing ##ati ##fied freshman slot jill tackles baroque backs ##iest lone sponsor destiny altogether convert ##aro consensus shapes demonstration basically feminist auction artifacts ##bing strongest twitter halifax 2019 allmusic mighty smallest precise alexandra viola ##los ##ille manuscripts ##illo dancers ari managers monuments blades barracks springfield maiden consolidated electron ##end berry airing wheat nobel inclusion blair payments geography bee cc eleanor react ##hurst afc manitoba ##yu su lineup fitness recreational investments airborne disappointment ##dis edmonton viewing ##row renovation ##cast infant bankruptcy roses aftermath pavilion ##yer carpenter withdrawal ladder ##hy discussing popped reliable agreements rochester ##abad curves bombers 220 rao reverend decreased choosing 107 stiff consulting naples crawford tracy ka ribbon cops ##lee crushed deciding unified teenager accepting flagship explorer poles sanchez inspection revived skilled induced exchanged flee locals tragedy swallow loading hanna demonstrate ##ela salvador flown contestants civilization ##ines wanna rhodes fletcher hector knocking considers ##ough nash mechanisms sensed mentally walt unclear ##eus renovated madame ##cks crews governmental ##hin undertaken monkey ##ben ##ato fatal armored copa caves governance grasp perception certification froze damp tugged wyoming ##rg ##ero newman ##lor nerves curiosity graph 115 ##ami withdraw tunnels dull meredith moss exhibits neighbors communicate accuracy explored raiders republicans secular kat superman penny criticised ##tch freed update conviction wade ham likewise delegation gotta doll promises technological myth nationality resolve convent ##mark sharon dig sip coordinator entrepreneur fold ##dine capability councillor synonym blown swan cursed 1815 jonas haired sofa canvas keeper rivalry ##hart rapper speedway swords postal maxwell estonia potter recurring ##nn ##ave errors ##oni cognitive 1834 ##² claws nadu roberto bce wrestler ellie ##ations infinite ink ##tia presumably finite staircase 108 noel patricia nacional ##cation chill eternal tu preventing prussia fossil limbs ##logist ernst frog perez rene ##ace pizza prussian ##ios ##vy molecules regulatory answering opinions sworn lengths supposedly hypothesis upward habitats seating ancestors drank yield hd synthesis researcher modest ##var mothers peered voluntary homeland ##the acclaim ##igan static valve luxembourg alto carroll fe receptor norton ambulance ##tian johnston catholics depicting jointly elephant gloria mentor badge ahmad distinguish remarked councils precisely allison advancing detection crowded ##10 cooperative ankle mercedes dagger surrendered pollution commit subway jeffrey lesson sculptures provider ##fication membrane timothy rectangular fiscal heating teammate basket particle anonymous deployment ##ple missiles courthouse proportion shoe sec ##ller complaints forbes blacks abandon remind sizes overwhelming autobiography natalie ##awa risks contestant countryside babies scorer invaded enclosed proceed hurling disorders ##cu reflecting continuously cruiser graduates freeway investigated ore deserved maid blocking phillip jorge shakes dove mann variables lacked burden accompanying que consistently organizing provisional complained endless ##rm tubes juice georges krishna mick labels thriller ##uch laps arcade sage snail ##table shannon fi laurence seoul vacation presenting hire churchill surprisingly prohibited savannah technically ##oli 170 ##lessly testimony suited speeds toys romans mlb flowering measurement talented kay settings charleston expectations shattered achieving triumph ceremonies portsmouth lanes mandatory loser stretching cologne realizes seventy cornell careers webb ##ulating americas budapest ava suspicion ##ison yo conrad ##hai sterling jessie rector ##az 1831 transform organize loans christine volcanic warrant slender summers subfamily newer danced dynamics rhine proceeds heinrich gastropod commands sings facilitate easter ra positioned responses expense fruits yanked imported 25th velvet vic primitive tribune baldwin neighbourhood donna rip hay pr ##uro 1814 espn welcomed ##aria qualifier glare highland timing ##cted shells eased geometry louder exciting slovakia ##sion ##iz ##lot savings prairie ##ques marching rafael tonnes ##lled curtain preceding shy heal greene worthy ##pot detachment bury sherman ##eck reinforced seeks bottles contracted duchess outfit walsh ##sc mickey ##ase geoffrey archer squeeze dawson eliminate invention ##enberg neal ##eth stance dealer coral maple retire polo simplified ##ht 1833 hid watts backwards jules ##oke genesis mt frames rebounds burma woodland moist santos whispers drained subspecies ##aa streaming ulster burnt correspondence maternal gerard denis stealing ##load genius duchy ##oria inaugurated momentum suits placement sovereign clause thames ##hara confederation reservation sketch yankees lets rotten charm hal verses ultra commercially dot salon citation adopt winnipeg mist allocated cairo ##boy jenkins interference objectives ##wind 1820 portfolio armoured sectors ##eh initiatives ##world integrity exercises robe tap ab gazed ##tones distracted rulers 111 favorable jerome tended cart factories ##eri diplomat valued gravel charitable ##try calvin exploring chang shepherd terrace pdf pupil ##ural reflects ups ##rch governors shelf depths ##nberg trailed crest tackle ##nian ##ats hatred ##kai clare makers ethiopia longtime detected embedded lacking slapped rely thomson anticipation iso morton successive agnes screenwriter straightened philippe playwright haunted licence iris intentions sutton 112 logical correctly ##weight branded licked tipped silva ricky narrator requests ##ents greeted supernatural cow ##wald lung refusing employer strait gaelic liner ##piece zoe sabha ##mba driveway harvest prints bates reluctantly threshold algebra ira wherever coupled 240 assumption picks ##air designers raids gentlemen ##ean roller blowing leipzig locks screw dressing strand ##lings scar dwarf depicts ##nu nods ##mine differ boris ##eur yuan flip ##gie mob invested questioning applying ##ture shout ##sel gameplay blamed illustrations bothered weakness rehabilitation ##of ##zes envelope rumors miners leicester subtle kerry ##ico ferguson ##fu premiership ne ##cat bengali prof catches remnants dana ##rily shouting presidents baltic ought ghosts dances sailors shirley fancy dominic ##bie madonna ##rick bark buttons gymnasium ashes liver toby oath providence doyle evangelical nixon cement carnegie embarked hatch surroundings guarantee needing pirate essence ##bee filter crane hammond projected immune percy twelfth ##ult regent doctoral damon mikhail ##ichi lu critically elect realised abortion acute screening mythology steadily ##fc frown nottingham kirk wa minneapolis ##rra module algeria mc nautical encounters surprising statues availability shirts pie alma brows munster mack soup crater tornado sanskrit cedar explosive bordered dixon planets stamp exam happily ##bble carriers kidnapped ##vis accommodation emigrated ##met knockout correspondent violation profits peaks lang specimen agenda ancestry pottery spelling equations obtaining ki linking 1825 debris asylum ##20 buddhism teddy ##ants gazette ##nger ##sse dental eligibility utc fathers averaged zimbabwe francesco coloured hissed translator lynch mandate humanities mackenzie uniforms lin ##iana ##gio asset mhz fitting samantha genera wei rim beloved shark riot entities expressions indo carmen slipping owing abbot neighbor sidney ##av rats recommendations encouraging squadrons anticipated commanders conquered ##oto donations diagnosed ##mond divide ##iva guessed decoration vernon auditorium revelation conversations ##kers ##power herzegovina dash alike protested lateral herman accredited mg ##gent freeman mel fiji crow crimson ##rine livestock ##pped humanitarian bored oz whip ##lene ##ali legitimate alter grinning spelled anxious oriental wesley ##nin ##hole carnival controller detect ##ssa bowed educator kosovo macedonia ##sin occupy mastering stephanie janeiro para unaware nurses noon 135 cam hopefully ranger combine sociology polar rica ##eer neill ##sman holocaust ##ip doubled lust 1828 109 decent cooling unveiled ##card 1829 nsw homer chapman meyer ##gin dive mae reagan expertise ##gled darwin brooke sided prosecution investigating comprised petroleum genres reluctant differently trilogy johns vegetables corpse highlighted lounge pension unsuccessfully elegant aided ivory beatles amelia cain dubai sunny immigrant babe click ##nder underwater pepper combining mumbled atlas horns accessed ballad physicians homeless gestured rpm freak louisville corporations patriots prizes rational warn modes decorative overnight din troubled phantom ##ort monarch sheer ##dorf generals guidelines organs addresses ##zon enhance curling parishes cord ##kie linux caesar deutsche bavaria ##bia coleman cyclone ##eria bacon petty ##yama ##old hampton diagnosis 1824 throws complexity rita disputed ##₃ pablo ##sch marketed trafficking ##ulus examine plague formats ##oh vault faithful ##bourne webster ##ox highlights ##ient ##ann phones vacuum sandwich modeling ##gated bolivia clergy qualities isabel ##nas ##ars wears screams reunited annoyed bra ##ancy ##rate differential transmitter tattoo container poker ##och excessive resides cowboys ##tum augustus trash providers statute retreated balcony reversed void storey preceded masses leap laughs neighborhoods wards schemes falcon santo battlefield pad ronnie thread lesbian venus ##dian beg sandstone daylight punched gwen analog stroked wwe acceptable measurements dec toxic ##kel adequate surgical economist parameters varsity ##sberg quantity ella ##chy ##rton countess generating precision diamonds expressway ga ##ı 1821 uruguay talents galleries expenses scanned colleague outlets ryder lucien ##ila paramount ##bon syracuse dim fangs gown sweep ##sie toyota missionaries websites ##nsis sentences adviser val trademark spells ##plane patience starter slim ##borg toe incredibly shoots elliot nobility ##wyn cowboy endorsed gardner tendency persuaded organisms emissions kazakhstan amused boring chips themed ##hand llc constantinople chasing systematic guatemala borrowed erin carey ##hard highlands struggles 1810 ##ifying ##ced wong exceptions develops enlarged kindergarten castro ##ern ##rina leigh zombie juvenile ##most consul ##nar sailor hyde clarence intensive pinned nasty useless jung clayton stuffed exceptional ix apostolic 230 transactions ##dge exempt swinging cove religions ##ash shields dairy bypass 190 pursuing bug joyce bombay chassis southampton chat interact redesignated ##pen nascar pray salmon rigid regained malaysian grim publicity constituted capturing toilet delegate purely tray drift loosely striker weakened trinidad mitch itv defines transmitted ming scarlet nodding fitzgerald fu narrowly sp tooth standings virtue ##₁ ##wara ##cting chateau gloves lid ##nel hurting conservatory ##pel sinclair reopened sympathy nigerian strode advocated optional chronic discharge ##rc suck compatible laurel stella shi fails wage dodge 128 informal sorts levi buddha villagers ##aka chronicles heavier summoned gateway 3000 eleventh jewelry translations accordingly seas ##ency fiber pyramid cubic dragging ##ista caring ##ops android contacted lunar ##dt kai lisbon patted 1826 sacramento theft madagascar subtropical disputes ta holidays piper willow mare cane itunes newfoundland benny companions dong raj observe roar charming plaque tibetan fossils enacted manning bubble tina tanzania ##eda ##hir funk swamp deputies cloak ufc scenario par scratch metals anthem guru engaging specially ##boat dialects nineteen cecil duet disability messenger unofficial ##lies defunct eds moonlight drainage surname puzzle honda switching conservatives mammals knox broadcaster sidewalk cope ##ried benson princes peterson ##sal bedford sharks eli wreck alberto gasp archaeology lgbt teaches securities madness compromise waving coordination davidson visions leased possibilities eighty jun fernandez enthusiasm assassin sponsorship reviewer kingdoms estonian laboratories ##fy ##nal applies verb celebrations ##zzo rowing lightweight sadness submit mvp balanced dude ##vas explicitly metric magnificent mound brett mohammad mistakes irregular ##hing ##ass sanders betrayed shipped surge ##enburg reporters termed georg pity verbal bulls abbreviated enabling appealed ##are ##atic sicily sting heel sweetheart bart spacecraft brutal monarchy ##tter aberdeen cameo diane ##ub survivor clyde ##aries complaint ##makers clarinet delicious chilean karnataka coordinates 1818 panties ##rst pretending ar dramatically kiev bella tends distances 113 catalog launching instances telecommunications portable lindsay vatican ##eim angles aliens marker stint screens bolton ##rne judy wool benedict plasma europa spark imaging filmmaker swiftly ##een contributor ##nor opted stamps apologize financing butter gideon sophisticated alignment avery chemicals yearly speculation prominence professionally ##ils immortal institutional inception wrists identifying tribunal derives gains ##wo papal preference linguistic vince operative brewery ##ont unemployment boyd ##ured ##outs albeit prophet 1813 bi ##rr ##face ##rad quarterly asteroid cleaned radius temper ##llen telugu jerk viscount menu ##ote glimpse ##aya yacht hawaiian baden ##rl laptop readily ##gu monetary offshore scots watches ##yang ##arian upgrade needle xbox lea encyclopedia flank fingertips ##pus delight teachings confirm roth beaches midway winters ##iah teasing daytime beverly gambling bonnie ##backs regulated clement hermann tricks knot ##shing ##uring ##vre detached ecological owed specialty byron inventor bats stays screened unesco midland trim affection ##ander ##rry jess thoroughly feedback ##uma chennai strained heartbeat wrapping overtime pleaded ##sworth mon leisure oclc ##tate ##ele feathers angelo thirds nuts surveys clever gill commentator ##dos darren rides gibraltar ##nc ##mu dissolution dedication shin meals saddle elvis reds chaired taller appreciation functioning niece favored advocacy robbie criminals suffolk yugoslav passport constable congressman hastings vera ##rov consecrated sparks ecclesiastical confined ##ovich muller floyd nora 1822 paved 1827 cumberland ned saga spiral ##flow appreciated yi collaborative treating similarities feminine finishes ##ib jade import ##nse ##hot champagne mice securing celebrities helsinki attributes ##gos cousins phases ache lucia gandhi submission vicar spear shine tasmania biting detention constitute tighter seasonal ##gus terrestrial matthews ##oka effectiveness parody philharmonic ##onic 1816 strangers encoded consortium guaranteed regards shifts tortured collision supervisor inform broader insight theaters armour emeritus blink incorporates mapping ##50 ##ein handball flexible ##nta substantially generous thief ##own carr loses 1793 prose ucla romeo generic metallic realization damages mk commissioners zach default ##ther helicopters lengthy stems spa partnered spectators rogue indication penalties teresa 1801 sen ##tric dalton ##wich irving photographic ##vey dell deaf peters excluded unsure ##vable patterson crawled ##zio resided whipped latvia slower ecole pipes employers maharashtra comparable va textile pageant ##gel alphabet binary irrigation chartered choked antoine offs waking supplement ##wen quantities demolition regain locate urdu folks alt 114 ##mc scary andreas whites ##ava classrooms mw aesthetic publishes valleys guides cubs johannes bryant conventions affecting ##itt drain awesome isolation prosecutor ambitious apology captive downs atmospheric lorenzo aisle beef foul ##onia kidding composite disturbed illusion natives ##ffer emi rockets riverside wartime painters adolf melted ##ail uncertainty simulation hawks progressed meantime builder spray breach unhappy regina russians ##urg determining ##tation tram 1806 ##quin aging ##12 1823 garion rented mister diaz terminated clip 1817 depend nervously disco owe defenders shiva notorious disbelief shiny worcester ##gation ##yr trailing undertook islander belarus limitations watershed fuller overlooking utilized raphael 1819 synthetic breakdown klein ##nate moaned memoir lamb practicing ##erly cellular arrows exotic ##graphy witches 117 charted rey hut hierarchy subdivision freshwater giuseppe aloud reyes qatar marty sideways utterly sexually jude prayers mccarthy softball blend damien ##gging ##metric wholly erupted lebanese negro revenues tasted comparative teamed transaction labeled maori sovereignty parkway trauma gran malay 121 advancement descendant 2020 buzz salvation inventory symbolic ##making antarctica mps ##gas ##bro mohammed myanmar holt submarines tones ##lman locker patriarch bangkok emerson remarks predators kin afghan confession norwich rental emerge advantages ##zel rca ##hold shortened storms aidan ##matic autonomy compliance ##quet dudley atp ##osis 1803 motto documentation summary professors spectacular christina archdiocese flashing innocence remake ##dell psychic reef scare employ rs sticks meg gus leans ##ude accompany bergen tomas ##iko doom wages pools ##nch ##bes breasts scholarly alison outline brittany breakthrough willis realistic ##cut ##boro competitor ##stan pike picnic icon designing commercials washing villain skiing micro costumes auburn halted executives ##hat logistics cycles vowel applicable barrett exclaimed eurovision eternity ramon ##umi ##lls modifications sweeping disgust ##uck torch aviv ensuring rude dusty sonic donovan outskirts cu pathway ##band ##gun ##lines disciplines acids cadet paired ##40 sketches ##sive marriages ##⁺ folding peers slovak implies admired ##beck 1880s leopold instinct attained weston megan horace ##ination dorsal ingredients evolutionary ##its complications deity lethal brushing levy deserted institutes posthumously delivering telescope coronation motivated rapids luc flicked pays volcano tanner weighed ##nica crowds frankie gifted addressing granddaughter winding ##rna constantine gomez ##front landscapes rudolf anthropology slate werewolf ##lio astronomy circa rouge dreaming sack knelt drowned naomi prolific tracked freezing herb ##dium agony randall twisting wendy deposit touches vein wheeler ##bbled ##bor batted retaining tire presently compare specification daemon nigel ##grave merry recommendation czechoslovakia sandra ng roma ##sts lambert inheritance sheikh winchester cries examining ##yle comeback cuisine nave ##iv ko retrieve tomatoes barker polished defining irene lantern personalities begging tract swore 1809 175 ##gic omaha brotherhood ##rley haiti ##ots exeter ##ete ##zia steele dumb pearson 210 surveyed elisabeth trends ##ef fritz ##rf premium bugs fraction calmly viking ##birds tug inserted unusually ##ield confronted distress crashing brent turks resign ##olo cambodia gabe sauce ##kal evelyn 116 extant clusters quarry teenagers luna ##lers ##ister affiliation drill ##ashi panthers scenic libya anita strengthen inscriptions ##cated lace sued judith riots ##uted mint ##eta preparations midst dub challenger ##vich mock cf displaced wicket breaths enables schmidt analyst ##lum ag highlight automotive axe josef newark sufficiently resembles 50th ##pal flushed mum traits ##ante commodore incomplete warming titular ceremonial ethical 118 celebrating eighteenth cao lima medalist mobility strips snakes ##city miniature zagreb barton escapes umbrella automated doubted differs cooled georgetown dresden cooked fade wyatt rna jacobs carlton abundant stereo boost madras inning ##hia spur ip malayalam begged osaka groan escaping charging dose vista ##aj bud papa communists advocates edged tri ##cent resemble peaking necklace fried montenegro saxony goose glances stuttgart curator recruit grocery sympathetic ##tting ##fort 127 lotus randolph ancestor ##rand succeeding jupiter 1798 macedonian ##heads hiking 1808 handing fischer ##itive garbage node ##pies prone singular papua inclined attractions italia pouring motioned grandma garnered jacksonville corp ego ringing aluminum ##hausen ordering ##foot drawer traders synagogue ##play ##kawa resistant wandering fragile fiona teased var hardcore soaked jubilee decisive exposition mercer poster valencia hale kuwait 1811 ##ises ##wr ##eed tavern gamma 122 johan ##uer airways amino gil ##ury vocational domains torres ##sp generator folklore outcomes ##keeper canberra shooter fl beams confrontation ##lling ##gram feb aligned forestry pipeline jax motorway conception decay ##tos coffin ##cott stalin 1805 escorted minded ##nam sitcom purchasing twilight veronica additions passive tensions straw 123 frequencies 1804 refugee cultivation ##iate christie clary bulletin crept disposal ##rich ##zong processor crescent ##rol bmw emphasized whale nazis aurora ##eng dwelling hauled sponsors toledo mega ideology theatres tessa cerambycidae saves turtle cone suspects kara rusty yelling greeks mozart shades cocked participant ##tro shire spit freeze necessity ##cos inmates nielsen councillors loaned uncommon omar peasants botanical offspring daniels formations jokes 1794 pioneers sigma licensing ##sus wheelchair polite 1807 liquor pratt trustee ##uta forewings balloon ##zz kilometre camping explicit casually shawn foolish teammates nm hassan carrie judged satisfy vanessa knives selective cnn flowed ##lice eclipse stressed eliza mathematician cease cultivated ##roy commissions browns ##ania destroyers sheridan meadow ##rius minerals ##cial downstream clash gram memoirs ventures baha seymour archie midlands edith fare flynn invite canceled tiles stabbed boulder incorporate amended camden facial mollusk unreleased descriptions yoga grabs 550 raises ramp shiver ##rose coined pioneering tunes qing warwick tops 119 melanie giles ##rous wandered ##inal annexed nov 30th unnamed ##ished organizational airplane normandy stoke whistle blessing violations chased holders shotgun ##ctic outlet reactor ##vik tires tearing shores fortified mascot constituencies nc columnist productive tibet ##rta lineage hooked oct tapes judging cody ##gger hansen kashmir triggered ##eva solved cliffs ##tree resisted anatomy protesters transparent implied ##iga injection mattress excluding ##mbo defenses helpless devotion ##elli growl liberals weber phenomena atoms plug ##iff mortality apprentice howe convincing aaa swimmer barber leone promptly sodium def nowadays arise ##oning gloucester corrected dignity norm erie ##ders elders evacuated sylvia compression ##yar hartford pose backpack reasoning accepts 24th wipe millimetres marcel ##oda dodgers albion 1790 overwhelmed aerospace oaks 1795 showcase acknowledge recovering nolan ashe hurts geology fashioned disappearance farewell swollen shrug marquis wimbledon 124 rue 1792 commemorate reduces experiencing inevitable calcutta intel ##court murderer sticking fisheries imagery bloom 280 brake ##inus gustav hesitation memorable po viral beans accidents tunisia antenna spilled consort treatments aye perimeter ##gard donation hostage migrated banker addiction apex lil trout ##ously conscience ##nova rams sands genome passionate troubles ##lets ##set amid ##ibility ##ret higgins exceed vikings ##vie payne ##zan muscular ##ste defendant sucking ##wal ibrahim fuselage claudia vfl europeans snails interval ##garh preparatory statewide tasked lacrosse viktor ##lation angola ##hra flint implications employs teens patrons stall weekends barriers scrambled nucleus tehran jenna parsons lifelong robots displacement 5000 ##bles precipitation ##gt knuckles clutched 1802 marrying ecology marx accusations declare scars kolkata mat meadows bermuda skeleton finalists vintage crawl coordinate affects subjected orchestral mistaken ##tc mirrors dipped relied 260 arches candle ##nick incorporating wildly fond basilica owl fringe rituals whispering stirred feud tertiary slick goat honorable whereby skip ricardo stripes parachute adjoining submerged synthesizer ##gren intend positively ninety phi beaver partition fellows alexis prohibition carlisle bizarre fraternity ##bre doubts icy cbc aquatic sneak sonny combines airports crude supervised spatial merge alfonso ##bic corrupt scan undergo ##ams disabilities colombian comparing dolphins perkins ##lish reprinted unanimous bounced hairs underworld midwest semester bucket paperback miniseries coventry demise ##leigh demonstrations sensor rotating yan ##hler arrange soils ##idge hyderabad labs ##dr brakes grandchildren ##nde negotiated rover ferrari continuation directorate augusta stevenson counterpart gore ##rda nursery rican ave collectively broadly pastoral repertoire asserted discovering nordic styled fiba cunningham harley middlesex survives tumor tempo zack aiming lok urgent ##rade ##nto devils ##ement contractor turin ##wl ##ool bliss repaired simmons moan astronomical cr negotiate lyric 1890s lara bred clad angus pbs ##ience engineered posed ##lk hernandez possessions elbows psychiatric strokes confluence electorate lifts campuses lava alps ##ep ##ution ##date physicist woody ##page ##ographic ##itis juliet reformation sparhawk 320 complement suppressed jewel ##½ floated ##kas continuity sadly ##ische inability melting scanning paula flour judaism safer vague ##lm solving curb ##stown financially gable bees expired miserable cassidy dominion 1789 cupped 145 robbery facto amos warden resume tallest marvin ing pounded usd declaring gasoline ##aux darkened 270 650 sophomore ##mere erection gossip televised risen dial ##eu pillars ##link passages profound ##tina arabian ashton silicon nail ##ead ##lated ##wer ##hardt fleming firearms ducked circuits blows waterloo titans ##lina atom fireplace cheshire financed activation algorithms ##zzi constituent catcher cherokee partnerships sexuality platoon tragic vivian guarded whiskey meditation poetic ##late ##nga ##ake porto listeners dominance kendra mona chandler factions 22nd salisbury attitudes derivative ##ido ##haus intake paced javier illustrator barrels bias cockpit burnett dreamed ensuing ##anda receptors someday hawkins mattered ##lal slavic 1799 jesuit cameroon wasted tai wax lowering victorious freaking outright hancock librarian sensing bald calcium myers tablet announcing barack shipyard pharmaceutical ##uan greenwich flush medley patches wolfgang pt speeches acquiring exams nikolai ##gg hayden kannada ##type reilly ##pt waitress abdomen devastated capped pseudonym pharmacy fulfill paraguay 1796 clicked ##trom archipelago syndicated ##hman lumber orgasm rejection clifford lorraine advent mafia rodney brock ##ght ##used ##elia cassette chamberlain despair mongolia sensors developmental upstream ##eg ##alis spanning 165 trombone basque seeded interred renewable rhys leapt revision molecule ##ages chord vicious nord shivered 23rd arlington debts corpus sunrise bays blackburn centimetres ##uded shuddered gm strangely gripping cartoons isabelle orbital ##ppa seals proving ##lton refusal strengthened bust assisting baghdad batsman portrayal mara pushes spears og ##cock reside nathaniel brennan 1776 confirmation caucus ##worthy markings yemen nobles ku lazy viewer catalan encompasses sawyer ##fall sparked substances patents braves arranger evacuation sergio persuade dover tolerance penguin cum jockey insufficient townships occupying declining plural processed projection puppet flanders introduces liability ##yon gymnastics antwerp taipei hobart candles jeep wes observers 126 chaplain bundle glorious ##hine hazel flung sol excavations dumped stares sh bangalore triangular icelandic intervals expressing turbine ##vers songwriting crafts ##igo jasmine ditch rite ##ways entertaining comply sorrow wrestlers basel emirates marian rivera helpful ##some caution downward networking ##atory ##tered darted genocide emergence replies specializing spokesman convenient unlocked fading augustine concentrations resemblance elijah investigator andhra ##uda promotes bean ##rrell fleeing wan simone announcer ##ame ##bby lydia weaver 132 residency modification ##fest stretches ##ast alternatively nat lowe lacks ##ented pam tile concealed inferior abdullah residences tissues vengeance ##ided moisture peculiar groove zip bologna jennings ninja oversaw zombies pumping batch livingston emerald installations 1797 peel nitrogen rama ##fying ##star schooling strands responding werner ##ost lime casa accurately targeting ##rod underway ##uru hemisphere lester ##yard occupies 2d griffith angrily reorganized ##owing courtney deposited ##dd ##30 estadio ##ifies dunn exiled ##ying checks ##combe ##о ##fly successes unexpectedly blu assessed ##flower ##ه observing sacked spiders kn ##tail mu nodes prosperity audrey divisional 155 broncos tangled adjust feeds erosion paolo surf directory snatched humid admiralty screwed gt reddish ##nese modules trench lamps bind leah bucks competes ##nz ##form transcription ##uc isles violently clutching pga cyclist inflation flats ragged unnecessary ##hian stubborn coordinated harriet baba disqualified 330 insect wolfe ##fies reinforcements rocked duel winked embraced bricks ##raj hiatus defeats pending brightly jealousy ##xton ##hm ##uki lena gdp colorful ##dley stein kidney ##shu underwear wanderers ##haw ##icus guardians m³ roared habits ##wise permits gp uranium punished disguise bundesliga elise dundee erotic partisan pi collectors float individually rendering behavioral bucharest ser hare valerie corporal nutrition proportional ##isa immense ##kis pavement ##zie ##eld sutherland crouched 1775 ##lp suzuki trades endurance operas crosby prayed priory rory socially ##urn gujarat ##pu walton cube pasha privilege lennon floods thorne waterfall nipple scouting approve ##lov minorities voter dwight extensions assure ballroom slap dripping privileges rejoined confessed demonstrating patriotic yell investor ##uth pagan slumped squares ##cle ##kins confront bert embarrassment ##aid aston urging sweater starr yuri brains williamson commuter mortar structured selfish exports ##jon cds ##him unfinished ##rre mortgage destinations ##nagar canoe solitary buchanan delays magistrate fk ##pling motivation ##lier ##vier recruiting assess ##mouth malik antique 1791 pius rahman reich tub zhou smashed airs galway xii conditioning honduras discharged dexter ##pf lionel 129 debates lemon tiffany volunteered dom dioxide procession devi sic tremendous advertisements colts transferring verdict hanover decommissioned utter relate pac racism ##top beacon limp similarity terra occurrence ant ##how becky capt updates armament richie pal ##graph halloween mayo ##ssen ##bone cara serena fcc dolls obligations ##dling violated lafayette jakarta exploitation ##ime infamous iconic ##lah ##park kitty moody reginald dread spill crystals olivier modeled bluff equilibrium separating notices ordnance extinction onset cosmic attachment sammy expose privy anchored ##bil abbott admits bending baritone emmanuel policeman vaughan winged climax dresses denny polytechnic mohamed burmese authentic nikki genetics grandparents homestead gaza postponed metacritic una ##sby ##bat unstable dissertation ##rial ##cian curls obscure uncovered bronx praying disappearing ##hoe prehistoric coke turret mutations nonprofit pits monaco ##ي ##usion prominently dispatched podium ##mir uci ##uation 133 fortifications birthplace kendall ##lby ##oll preacher rack goodman ##rman persistent ##ott countless jaime recorder lexington persecution jumps renewal wagons ##11 crushing ##holder decorations ##lake abundance wrath laundry £1 garde ##rp jeanne beetles peasant ##sl splitting caste sergei ##rer ##ema scripts ##ively rub satellites ##vor inscribed verlag scrapped gale packages chick potato slogan kathleen arabs ##culture counterparts reminiscent choral ##tead rand retains bushes dane accomplish courtesy closes ##oth slaughter hague krakow lawson tailed elias ginger ##ttes canopy betrayal rebuilding turf ##hof frowning allegiance brigades kicks rebuild polls alias nationalism td rowan audition bowie fortunately recognizes harp dillon horrified ##oro renault ##tics ropes ##α presumed rewarded infrared wiping accelerated illustration ##rid presses practitioners badminton ##iard detained ##tera recognizing relates misery ##sies ##tly reproduction piercing potatoes thornton esther manners hbo ##aan ours bullshit ernie perennial sensitivity illuminated rupert ##jin ##iss ##ear rfc nassau ##dock staggered socialism ##haven appointments nonsense prestige sharma haul ##tical solidarity gps ##ook ##rata igor pedestrian ##uit baxter tenants wires medication unlimited guiding impacts diabetes ##rama sasha pas clive extraction 131 continually constraints ##bilities sonata hunted sixteenth chu planting quote mayer pretended abs spat ##hua ceramic ##cci curtains pigs pitching ##dad latvian sore dayton ##sted ##qi patrols slice playground ##nted shone stool apparatus inadequate mates treason ##ija desires ##liga ##croft somalia laurent mir leonardo oracle grape obliged chevrolet thirteenth stunning enthusiastic ##ede accounted concludes currents basil ##kovic drought ##rica mai ##aire shove posting ##shed pilgrimage humorous packing fry pencil wines smells 144 marilyn aching newest clung bon neighbours sanctioned ##pie mug ##stock drowning ##mma hydraulic ##vil hiring reminder lilly investigators ##ncies sour ##eous compulsory packet ##rion ##graphic ##elle cannes ##inate depressed ##rit heroic importantly theresa ##tled conway saturn marginal rae ##xia corresponds royce pact jasper explosives packaging aluminium ##ttered denotes rhythmic spans assignments hereditary outlined originating sundays lad reissued greeting beatrice ##dic pillar marcos plots handbook alcoholic judiciary avant slides extract masculine blur ##eum ##force homage trembled owens hymn trey omega signaling socks accumulated reacted attic theo lining angie distraction primera talbot ##key 1200 ti creativity billed ##hey deacon eduardo identifies proposition dizzy gunner hogan ##yam ##pping ##hol ja ##chan jensen reconstructed ##berger clearance darius ##nier abe harlem plea dei circled emotionally notation fascist neville exceeded upwards viable ducks ##fo workforce racer limiting shri ##lson possesses 1600 kerr moths devastating laden disturbing locking ##cture gal fearing accreditation flavor aide 1870s mountainous ##baum melt ##ures motel texture servers soda ##mb herd ##nium erect puzzled hum peggy examinations gould testified geoff ren devised sacks ##law denial posters grunted cesar tutor ec gerry offerings byrne falcons combinations ct incoming pardon rocking 26th avengers flared mankind seller uttar loch nadia stroking exposing ##hd fertile ancestral instituted ##has noises prophecy taxation eminent vivid pol ##bol dart indirect multimedia notebook upside displaying adrenaline referenced geometric ##iving progression ##ddy blunt announce ##far implementing ##lav aggression liaison cooler cares headache plantations gorge dots impulse thickness ashamed averaging kathy obligation precursor 137 fowler symmetry thee 225 hears ##rai undergoing ads butcher bowler ##lip cigarettes subscription goodness ##ically browne ##hos ##tech kyoto donor ##erty damaging friction drifting expeditions hardened prostitution 152 fauna blankets claw tossing snarled butterflies recruits investigative coated healed 138 communal hai xiii academics boone psychologist restless lahore stephens mba brendan foreigners printer ##pc ached explode 27th deed scratched dared ##pole cardiac 1780 okinawa proto commando compelled oddly electrons ##base replica thanksgiving ##rist sheila deliberate stafford tidal representations hercules ou ##path ##iated kidnapping lenses ##tling deficit samoa mouths consuming computational maze granting smirk razor fixture ideals inviting aiden nominal ##vs issuing julio pitt ramsey docks ##oss exhaust ##owed bavarian draped anterior mating ethiopian explores noticing ##nton discarded convenience hoffman endowment beasts cartridge mormon paternal probe sleeves interfere lump deadline ##rail jenks bulldogs scrap alternating justified reproductive nam seize descending secretariat kirby coupe grouped smash panther sedan tapping ##18 lola cheer germanic unfortunate ##eter unrelated ##fan subordinate ##sdale suzanne advertisement ##ility horsepower ##lda cautiously discourse luigi ##mans ##fields noun prevalent mao schneider everett surround governorate kira ##avia westward ##take misty rails sustainability 134 unused ##rating packs toast unwilling regulate thy suffrage nile awe assam definitions travelers affordable ##rb conferred sells undefeated beneficial torso basal repeating remixes ##pass bahrain cables fang ##itated excavated numbering statutory ##rey deluxe ##lian forested ramirez derbyshire zeus slamming transfers astronomer banana lottery berg histories bamboo ##uchi resurrection posterior bowls vaguely ##thi thou preserving tensed offence ##inas meyrick callum ridden watt langdon tying lowland snorted daring truman ##hale ##girl aura overly filing weighing goa infections philanthropist saunders eponymous ##owski latitude perspectives reviewing mets commandant radial ##kha flashlight reliability koch vowels amazed ada elaine supper ##rth ##encies predator debated soviets cola ##boards ##nah compartment crooked arbitrary fourteenth ##ctive havana majors steelers clips profitable ambush exited packers ##tile nude cracks fungi ##е limb trousers josie shelby tens frederic ##ος definite smoothly constellation insult baton discs lingering ##nco conclusions lent staging becker grandpa shaky ##tron einstein obstacles sk adverse elle economically ##moto mccartney thor dismissal motions readings nostrils treatise ##pace squeezing evidently prolonged 1783 venezuelan je marguerite beirut takeover shareholders ##vent denise digit airplay norse ##bbling imaginary pills hubert blaze vacated eliminating ##ello vine mansfield ##tty retrospective barrow borne clutch bail forensic weaving ##nett ##witz desktop citadel promotions worrying dorset ieee subdivided ##iating manned expeditionary pickup synod chuckle 185 barney ##rz ##ffin functionality karachi litigation meanings uc lick turbo anders ##ffed execute curl oppose ankles typhoon ##د ##ache ##asia linguistics compassion pressures grazing perfection ##iting immunity monopoly muddy backgrounds 136 namibia francesca monitors attracting stunt tuition ##ии vegetable ##mates ##quent mgm jen complexes forts ##ond cellar bites seventeenth royals flemish failures mast charities ##cular peruvian capitals macmillan ipswich outward frigate postgraduate folds employing ##ouse concurrently fiery ##tai contingent nightmares monumental nicaragua ##kowski lizard mal fielding gig reject ##pad harding ##ipe coastline ##cin ##nos beethoven humphrey innovations ##tam ##nge norris doris solicitor huang obey 141 ##lc niagara ##tton shelves aug bourbon curry nightclub specifications hilton ##ndo centennial dispersed worm neglected briggs sm font kuala uneasy plc ##nstein ##bound ##aking ##burgh awaiting pronunciation ##bbed ##quest eh optimal zhu raped greens presided brenda worries ##life venetian marxist turnout ##lius refined braced sins grasped sunderland nickel speculated lowell cyrillic communism fundraising resembling colonists mutant freddie usc ##mos gratitude ##run mural ##lous chemist wi reminds 28th steals tess pietro ##ingen promoter ri microphone honoured rai sant ##qui feather ##nson burlington kurdish terrorists deborah sickness ##wed ##eet hazard irritated desperation veil clarity ##rik jewels xv ##gged ##ows ##cup berkshire unfair mysteries orchid winced exhaustion renovations stranded obe infinity ##nies adapt redevelopment thanked registry olga domingo noir tudor ole ##atus commenting behaviors ##ais crisp pauline probable stirling wigan ##bian paralympics panting surpassed ##rew luca barred pony famed ##sters cassandra waiter carolyn exported ##orted andres destructive deeds jonah castles vacancy suv ##glass 1788 orchard yep famine belarusian sprang ##forth skinny ##mis administrators rotterdam zambia zhao boiler discoveries ##ride ##physics lucius disappointing outreach spoon ##frame qualifications unanimously enjoys regency ##iidae stade realism veterinary rodgers dump alain chestnut castile censorship rumble gibbs ##itor communion reggae inactivated logs loads ##houses homosexual ##iano ale informs ##cas phrases plaster linebacker ambrose kaiser fascinated 850 limerick recruitment forge mastered ##nding leinster rooted threaten ##strom borneo ##hes suggestions scholarships propeller documentaries patronage coats constructing invest neurons comet entirety shouts identities annoying unchanged wary ##antly ##ogy neat oversight ##kos phillies replay constance ##kka incarnation humble skies minus ##acy smithsonian ##chel guerrilla jar cadets ##plate surplus audit ##aru cracking joanna louisa pacing ##lights intentionally ##iri diner nwa imprint australians tong unprecedented bunker naive specialists ark nichols railing leaked pedal ##uka shrub longing roofs v8 captains neural tuned ##ntal ##jet emission medina frantic codex definitive sid abolition intensified stocks enrique sustain genoa oxide ##written clues cha ##gers tributaries fragment venom ##rity ##ente ##sca muffled vain sire laos ##ingly ##hana hastily snapping surfaced sentiment motive ##oft contests approximate mesa luckily dinosaur exchanges propelled accord bourne relieve tow masks offended ##ues cynthia ##mmer rains bartender zinc reviewers lois ##sai legged arrogant rafe rosie comprise handicap blockade inlet lagoon copied drilling shelley petals ##inian mandarin obsolete ##inated onward arguably productivity cindy praising seldom busch discusses raleigh shortage ranged stanton encouragement firstly conceded overs temporal ##uke cbe ##bos woo certainty pumps ##pton stalked ##uli lizzie periodic thieves weaker ##night gases shoving chooses wc ##chemical prompting weights ##kill robust flanked sticky hu tuberculosis ##eb ##eal christchurch resembled wallet reese inappropriate pictured distract fixing fiddle giggled burger heirs hairy mechanic torque apache obsessed chiefly cheng logging ##tag extracted meaningful numb ##vsky gloucestershire reminding ##bay unite ##lit breeds diminished clown glove 1860s ##ن ##ug archibald focal freelance sliced depiction ##yk organism switches sights stray crawling ##ril lever leningrad interpretations loops anytime reel alicia delighted ##ech inhaled xiv suitcase bernie vega licenses northampton exclusion induction monasteries racecourse homosexuality ##right ##sfield ##rky dimitri michele alternatives ions commentators genuinely objected pork hospitality fencing stephan warships peripheral wit drunken wrinkled quentin spends departing chung numerical spokesperson ##zone johannesburg caliber killers ##udge assumes neatly demographic abigail bloc ##vel mounting ##lain bentley slightest xu recipients ##jk merlin ##writer seniors prisons blinking hindwings flickered kappa ##hel 80s strengthening appealing brewing gypsy mali lashes hulk unpleasant harassment bio treaties predict instrumentation pulp troupe boiling mantle ##ffe ins ##vn dividing handles verbs ##onal coconut senegal 340 thorough gum momentarily ##sto cocaine panicked destined ##turing teatro denying weary captained mans ##hawks ##code wakefield bollywood thankfully ##16 cyril ##wu amendments ##bahn consultation stud reflections kindness 1787 internally ##ovo tex mosaic distribute paddy seeming 143 ##hic piers ##15 ##mura ##verse popularly winger kang sentinel mccoy ##anza covenant ##bag verge fireworks suppress thrilled dominate ##jar swansea ##60 142 reconciliation ##ndi stiffened cue dorian ##uf damascus amor ida foremost ##aga porsche unseen dir ##had ##azi stony lexi melodies ##nko angular integer podcast ants inherent jaws justify persona ##olved josephine ##nr ##ressed customary flashes gala cyrus glaring backyard ariel physiology greenland html stir avon atletico finch methodology ked ##lent mas catholicism townsend branding quincy fits containers 1777 ashore aragon ##19 forearm poisoning ##sd adopting conquer grinding amnesty keller finances evaluate forged lankan instincts ##uto guam bosnian photographed workplace desirable protector ##dog allocation intently encourages willy ##sten bodyguard electro brighter ##ν bihar ##chev lasts opener amphibious sal verde arte ##cope captivity vocabulary yields ##tted agreeing desmond pioneered ##chus strap campaigned railroads ##ович emblem ##dre stormed 501 ##ulous marijuana northumberland ##gn ##nath bowen landmarks beaumont ##qua danube ##bler attorneys th ge flyers critique villains cass mutation acc ##0s colombo mckay motif sampling concluding syndicate ##rell neon stables ds warnings clint mourning wilkinson ##tated merrill leopard evenings exhaled emil sonia ezra discrete stove farrell fifteenth prescribed superhero ##rier worms helm wren ##duction ##hc expo ##rator hq unfamiliar antony prevents acceleration fiercely mari painfully calculations cheaper ign clifton irvine davenport mozambique ##np pierced ##evich wonders ##wig ##cate ##iling crusade ware ##uel enzymes reasonably mls ##coe mater ambition bunny eliot kernel ##fin asphalt headmaster torah aden lush pins waived ##care ##yas joao substrate enforce ##grad ##ules alvarez selections epidemic tempted ##bit bremen translates ensured waterfront 29th forrest manny malone kramer reigning cookies simpler absorption 205 engraved ##ffy evaluated 1778 haze 146 comforting crossover ##abe thorn ##rift ##imo ##pop suppression fatigue cutter ##tr 201 wurttemberg ##orf enforced hovering proprietary gb samurai syllable ascent lacey tick lars tractor merchandise rep bouncing defendants ##yre huntington ##ground ##oko standardized ##hor ##hima assassinated nu predecessors rainy liar assurance lyrical ##uga secondly flattened ios parameter undercover ##mity bordeaux punish ridges markers exodus inactive hesitate debbie nyc pledge savoy nagar offset organist ##tium hesse marin converting ##iver diagram propulsion pu validity reverted supportive ##dc ministries clans responds proclamation ##inae ##ø ##rea ein pleading patriot sf birch islanders strauss hates ##dh brandenburg concession rd ##ob 1900s killings textbook antiquity cinematography wharf embarrassing setup creed farmland inequality centred signatures fallon 370 ##ingham ##uts ceylon gazing directive laurie ##tern globally ##uated ##dent allah excavation threads ##cross 148 frantically icc utilize determines respiratory thoughtful receptions ##dicate merging chandra seine 147 builders builds diagnostic dev visibility goddamn analyses dhaka cho proves chancel concurrent curiously canadians pumped restoring 1850s turtles jaguar sinister spinal traction declan vows 1784 glowed capitalism swirling install universidad ##lder ##oat soloist ##genic ##oor coincidence beginnings nissan dip resorts caucasus combustion infectious ##eno pigeon serpent ##itating conclude masked salad jew ##gr surreal toni ##wc harmonica 151 ##gins ##etic ##coat fishermen intending bravery ##wave klaus titan wembley taiwanese ransom 40th incorrect hussein eyelids jp cooke dramas utilities ##etta ##print eisenhower principally granada lana ##rak openings concord ##bl bethany connie morality sega ##mons ##nard earnings ##kara ##cine wii communes ##rel coma composing softened severed grapes ##17 nguyen analyzed warlord hubbard heavenly behave slovenian ##hit ##ony hailed filmmakers trance caldwell skye unrest coward likelihood ##aging bern sci taliban honolulu propose ##wang 1700 browser imagining cobra contributes dukes instinctively conan violinist ##ores accessories gradual ##amp quotes sioux ##dating undertake intercepted sparkling compressed 139 fungus tombs haley imposing rests degradation lincolnshire retailers wetlands tulsa distributor dungeon nun greenhouse convey atlantis aft exits oman dresser lyons ##sti joking eddy judgement omitted digits ##cts ##game juniors ##rae cents stricken une ##ngo wizards weir breton nan technician fibers liking royalty ##cca 154 persia terribly magician ##rable ##unt vance cafeteria booker camille warmer ##static consume cavern gaps compass contemporaries foyer soothing graveyard maj plunged blush ##wear cascade demonstrates ordinance ##nov boyle ##lana rockefeller shaken banjo izzy ##ense breathless vines ##32 ##eman alterations chromosome dwellings feudal mole 153 catalonia relics tenant mandated ##fm fridge hats honesty patented raul heap cruisers accusing enlightenment infants wherein chatham contractors zen affinity hc osborne piston 156 traps maturity ##rana lagos ##zal peering ##nay attendant dealers protocols subset prospects biographical ##cre artery ##zers insignia nuns endured ##eration recommend schwartz serbs berger cromwell crossroads ##ctor enduring clasped grounded ##bine marseille twitched abel choke https catalyst moldova italians ##tist disastrous wee ##oured ##nti wwf nope ##piration ##asa expresses thumbs 167 ##nza coca 1781 cheating ##ption skipped sensory heidelberg spies satan dangers semifinal 202 bohemia whitish confusing shipbuilding relies surgeons landings ravi baku moor suffix alejandro ##yana litre upheld ##unk rajasthan ##rek coaster insists posture scenarios etienne favoured appoint transgender elephants poked greenwood defences fulfilled militant somali 1758 chalk potent ##ucci migrants wink assistants nos restriction activism niger ##ario colon shaun ##sat daphne ##erated swam congregations reprise considerations magnet playable xvi ##р overthrow tobias knob chavez coding ##mers propped katrina orient newcomer ##suke temperate ##pool farmhouse interrogation ##vd committing ##vert forthcoming strawberry joaquin macau ponds shocking siberia ##cellular chant contributors ##nant ##ologists sped absorb hail 1782 spared ##hore barbados karate opus originates saul ##xie evergreen leaped ##rock correlation exaggerated weekday unification bump tracing brig afb pathways utilizing ##ners mod mb disturbance kneeling ##stad ##guchi 100th pune ##thy decreasing 168 manipulation miriam academia ecosystem occupational rbi ##lem rift ##14 rotary stacked incorporation awakening generators guerrero racist ##omy cyber derivatives culminated allie annals panzer sainte wikipedia pops zu austro ##vate algerian politely nicholson mornings educate tastes thrill dartmouth ##gating db ##jee regan differing concentrating choreography divinity ##media pledged alexandre routing gregor madeline ##idal apocalypse ##hora gunfire culminating elves fined liang lam programmed tar guessing transparency gabrielle ##gna cancellation flexibility ##lining accession shea stronghold nets specializes ##rgan abused hasan sgt ling exceeding ##₄ admiration supermarket ##ark photographers specialised tilt resonance hmm perfume 380 sami threatens garland botany guarding boiled greet puppy russo supplier wilmington vibrant vijay ##bius paralympic grumbled paige faa licking margins hurricanes ##gong fest grenade ripping ##uz counseling weigh ##sian needles wiltshire edison costly ##not fulton tramway redesigned staffordshire cache gasping watkins sleepy candidacy ##group monkeys timeline throbbing ##bid ##sos berth uzbekistan vanderbilt bothering overturned ballots gem ##iger sunglasses subscribers hooker compelling ang exceptionally saloon stab ##rdi carla terrifying rom ##vision coil ##oids satisfying vendors 31st mackay deities overlooked ambient bahamas felipe olympia whirled botanist advertised tugging ##dden disciples morales unionist rites foley morse motives creepy ##₀ soo ##sz bargain highness frightening turnpike tory reorganization ##cer depict biographer ##walk unopposed manifesto ##gles institut emile accidental kapoor ##dam kilkenny cortex lively ##13 romanesque jain shan cannons ##ood ##ske petrol echoing amalgamated disappears cautious proposes sanctions trenton ##ر flotilla aus contempt tor canary cote theirs ##hun conceptual deleted fascinating paso blazing elf honourable hutchinson ##eiro ##outh ##zin surveyor tee amidst wooded reissue intro ##ono cobb shelters newsletter hanson brace encoding confiscated dem caravan marino scroll melodic cows imam ##adi ##aneous northward searches biodiversity cora 310 roaring ##bers connell theologian halo compose pathetic unmarried dynamo ##oot az calculation toulouse deserves humour nr forgiveness tam undergone martyr pamela myths whore counselor hicks 290 heavens battleship electromagnetic ##bbs stellar establishments presley hopped ##chin temptation 90s wills nas ##yuan nhs ##nya seminars ##yev adaptations gong asher lex indicator sikh tobago cites goin ##yte satirical ##gies characterised correspond bubbles lure participates ##vid eruption skate therapeutic 1785 canals wholesale defaulted sac 460 petit ##zzled virgil leak ravens 256 portraying ##yx ghetto creators dams portray vicente ##rington fae namesake bounty ##arium joachim ##ota ##iser aforementioned axle snout depended dismantled reuben 480 ##ibly gallagher ##lau ##pd earnest ##ieu ##iary inflicted objections ##llar asa gritted ##athy jericho ##sea ##was flick underside ceramics undead substituted 195 eastward undoubtedly wheeled chimney ##iche guinness cb ##ager siding ##bell traitor baptiste disguised inauguration 149 tipperary choreographer perched warmed stationary eco ##ike ##ntes bacterial ##aurus flores phosphate ##core attacker invaders alvin intersects a1 indirectly immigrated businessmen cornelius valves narrated pill sober ul nationale monastic applicants scenery ##jack 161 motifs constitutes cpu ##osh jurisdictions sd tuning irritation woven ##uddin fertility gao ##erie antagonist impatient glacial hides boarded denominations interception ##jas cookie nicola ##tee algebraic marquess bahn parole buyers bait turbines paperwork bestowed natasha renee oceans purchases 157 vaccine 215 ##tock fixtures playhouse integrate jai oswald intellectuals ##cky booked nests mortimer ##isi obsession sept ##gler ##sum 440 scrutiny simultaneous squinted ##shin collects oven shankar penned remarkably ##я slips luggage spectral 1786 collaborations louie consolidation ##ailed ##ivating 420 hoover blackpool harness ignition vest tails belmont mongol skinner ##nae visually mage derry ##tism ##unce stevie transitional ##rdy redskins drying prep prospective ##21 annoyance oversee ##loaded fills ##books ##iki announces fda scowled respects prasad mystic tucson ##vale revue springer bankrupt 1772 aristotle salvatore habsburg ##geny dal natal nut pod chewing darts moroccan walkover rosario lenin punjabi ##ße grossed scattering wired invasive hui polynomial corridors wakes gina portrays ##cratic arid retreating erich irwin sniper ##dha linen lindsey maneuver butch shutting socio bounce commemorative postseason jeremiah pines 275 mystical beads bp abbas furnace bidding consulted assaulted empirical rubble enclosure sob weakly cancel polly yielded ##emann curly prediction battered 70s vhs jacqueline render sails barked detailing grayson riga sloane raging ##yah herbs bravo ##athlon alloy giggle imminent suffers assumptions waltz ##itate accomplishments ##ited bathing remixed deception prefix ##emia deepest ##tier ##eis balkan frogs ##rong slab ##pate philosophers peterborough grains imports dickinson rwanda ##atics 1774 dirk lan tablets ##rove clone ##rice caretaker hostilities mclean ##gre regimental treasures norms impose tsar tango diplomacy variously complain 192 recognise arrests 1779 celestial pulitzer ##dus bing libretto ##moor adele splash ##rite expectation lds confronts ##izer spontaneous harmful wedge entrepreneurs buyer ##ope bilingual translate rugged conner circulated uae eaton ##gra ##zzle lingered lockheed vishnu reelection alonso ##oom joints yankee headline cooperate heinz laureate invading ##sford echoes scandinavian ##dham hugging vitamin salute micah hind trader ##sper radioactive ##ndra militants poisoned ratified remark campeonato deprived wander prop ##dong outlook ##tani ##rix ##eye chiang darcy ##oping mandolin spice statesman babylon 182 walled forgetting afro ##cap 158 giorgio buffer ##polis planetary ##gis overlap terminals kinda centenary ##bir arising manipulate elm ke 1770 ak ##tad chrysler mapped moose pomeranian quad macarthur assemblies shoreline recalls stratford ##rted noticeable ##evic imp ##rita ##sque accustomed supplying tents disgusted vogue sipped filters khz reno selecting luftwaffe mcmahon tyne masterpiece carriages collided dunes exercised flare remembers muzzle ##mobile heck ##rson burgess lunged middleton boycott bilateral ##sity hazardous lumpur multiplayer spotlight jackets goldman liege porcelain rag waterford benz attracts hopeful battling ottomans kensington baked hymns cheyenne lattice levine borrow polymer clashes michaels monitored commitments denounced ##25 ##von cavity ##oney hobby akin ##holders futures intricate cornish patty ##oned illegally dolphin ##lag barlow yellowish maddie apologized luton plagued ##puram nana ##rds sway fanny łodz ##rino psi suspicions hanged ##eding initiate charlton ##por nak competent 235 analytical annex wardrobe reservations ##rma sect 162 fairfax hedge piled buckingham uneven bauer simplicity snyder interpret accountability donors moderately byrd continents ##cite ##max disciple hr jamaican ping nominees ##uss mongolian diver attackers eagerly ideological pillows miracles apartheid revolver sulfur clinics moran 163 ##enko ile katy rhetoric ##icated chronology recycling ##hrer elongated mughal pascal profiles vibration databases domination ##fare ##rant matthias digest rehearsal polling weiss initiation reeves clinging flourished impress ngo ##hoff ##ume buckley symposium rhythms weed emphasize transforming ##taking ##gence ##yman accountant analyze flicker foil priesthood voluntarily decreases ##80 ##hya slater sv charting mcgill ##lde moreno ##iu besieged zur robes ##phic admitting api deported turmoil peyton earthquakes ##ares nationalists beau clair brethren interrupt welch curated galerie requesting 164 ##ested impending steward viper ##vina complaining beautifully brandy foam nl 1660 ##cake alessandro punches laced explanations ##lim attribute clit reggie discomfort ##cards smoothed whales ##cene adler countered duffy disciplinary widening recipe reliance conducts goats gradient preaching ##shaw matilda quasi striped meridian cannabis cordoba certificates ##agh ##tering graffiti hangs pilgrims repeats ##ych revive urine etat ##hawk fueled belts fuzzy susceptible ##hang mauritius salle sincere beers hooks ##cki arbitration entrusted advise sniffed seminar junk donnell processors principality strapped celia mendoza everton fortunes prejudice starving reassigned steamer ##lund tuck evenly foreman ##ffen dans 375 envisioned slit ##xy baseman liberia rosemary ##weed electrified periodically potassium stride contexts sperm slade mariners influx bianca subcommittee ##rane spilling icao estuary ##nock delivers iphone ##ulata isa mira bohemian dessert ##sbury welcoming proudly slowing ##chs musee ascension russ ##vian waits ##psy africans exploit ##morphic gov eccentric crab peck ##ull entrances formidable marketplace groom bolted metabolism patton robbins courier payload endure ##ifier andes refrigerator ##pr ornate ##uca ruthless illegitimate masonry strasbourg bikes adobe ##³ apples quintet willingly niche bakery corpses energetic ##cliffe ##sser ##ards 177 centimeters centro fuscous cretaceous rancho ##yde andrei telecom tottenham oasis ordination vulnerability presiding corey cp penguins sims ##pis malawi piss ##48 correction ##cked ##ffle ##ryn countdown detectives psychiatrist psychedelic dinosaurs blouse ##get choi vowed ##oz randomly ##pol 49ers scrub blanche bruins dusseldorf ##using unwanted ##ums 212 dominique elevations headlights om laguna ##oga 1750 famously ignorance shrewsbury ##aine ajax breuning che confederacy greco overhaul ##screen paz skirts disagreement cruelty jagged phoebe shifter hovered viruses ##wes mandy ##lined ##gc landlord squirrel dashed ##ι ornamental gag wally grange literal spurs undisclosed proceeding yin ##text billie orphan spanned humidity indy weighted presentations explosions lucian ##tary vaughn hindus ##anga ##hell psycho 171 daytona protects efficiently rematch sly tandem ##oya rebranded impaired hee metropolis peach godfrey diaspora ethnicity prosperous gleaming dar grossing playback ##rden stripe pistols ##tain births labelled ##cating 172 rudy alba ##onne aquarium hostility ##gb ##tase shudder sumatra hardest lakers consonant creeping demos homicide capsule zeke liberties expulsion pueblo ##comb trait transporting ##ddin ##neck ##yna depart gregg mold ledge hangar oldham playboy termination analysts gmbh romero ##itic insist cradle filthy brightness slash shootout deposed bordering ##truct isis microwave tumbled sheltered cathy werewolves messy andersen convex clapped clinched satire wasting edo vc rufus ##jak mont ##etti poznan ##keeping restructuring transverse ##rland azerbaijani slovene gestures roommate choking shear ##quist vanguard oblivious ##hiro disagreed baptism ##lich coliseum ##aceae salvage societe cory locke relocation relying versailles ahl swelling ##elo cheerful ##word ##edes gin sarajevo obstacle diverted ##nac messed thoroughbred fluttered utrecht chewed acquaintance assassins dispatch mirza ##wart nike salzburg swell yen ##gee idle ligue samson ##nds ##igh playful spawned ##cise tease ##case burgundy ##bot stirring skeptical interceptions marathi ##dies bedrooms aroused pinch ##lik preferences tattoos buster digitally projecting rust ##ital kitten priorities addison pseudo ##guard dusk icons sermon ##psis ##iba bt ##lift ##xt ju truce rink ##dah ##wy defects psychiatry offences calculate glucose ##iful ##rized ##unda francaise ##hari richest warwickshire carly 1763 purity redemption lending ##cious muse bruises cerebral aero carving ##name preface terminology invade monty ##int anarchist blurred ##iled rossi treats guts shu foothills ballads undertaking premise cecilia affiliates blasted conditional wilder minors drone rudolph buffy swallowing horton attested ##hop rutherford howell primetime livery penal ##bis minimize hydro wrecked wrought palazzo ##gling cans vernacular friedman nobleman shale walnut danielle ##ection ##tley sears ##kumar chords lend flipping streamed por dracula gallons sacrifices gamble orphanage ##iman mckenzie ##gible boxers daly ##balls ##ان 208 ##ific ##rative ##iq exploited slated ##uity circling hillary pinched goldberg provost campaigning lim piles ironically jong mohan successors usaf ##tem ##ught autobiographical haute preserves ##ending acquitted comparisons 203 hydroelectric gangs cypriot torpedoes rushes chrome derive bumps instability fiat pets ##mbe silas dye reckless settler ##itation info heats ##writing 176 canonical maltese fins mushroom stacy aspen avid ##kur ##loading vickers gaston hillside statutes wilde gail kung sabine comfortably motorcycles ##rgo 169 pneumonia fetch ##sonic axel faintly parallels ##oop mclaren spouse compton interdisciplinary miner ##eni 181 clamped ##chal ##llah separates versa ##mler scarborough labrador ##lity ##osing rutgers hurdles como 166 burt divers ##100 wichita cade coincided ##erson bruised mla ##pper vineyard ##ili ##brush notch mentioning jase hearted kits doe ##acle pomerania ##ady ronan seizure pavel problematic ##zaki domenico ##ulin catering penelope dependence parental emilio ministerial atkinson ##bolic clarkson chargers colby grill peeked arises summon ##aged fools ##grapher faculties qaeda ##vial garner refurbished ##hwa geelong disasters nudged bs shareholder lori algae reinstated rot ##ades ##nous invites stainless 183 inclusive ##itude diocesan til ##icz denomination ##xa benton floral registers ##ider ##erman ##kell absurd brunei guangzhou hitter retaliation ##uled ##eve blanc nh consistency contamination ##eres ##rner dire palermo broadcasters diaries inspire vols brewer tightening ky mixtape hormone ##tok stokes ##color ##dly ##ssi pg ##ometer ##lington sanitation ##tility intercontinental apps ##adt ¹⁄₂ cylinders economies favourable unison croix gertrude odyssey vanity dangling ##logists upgrades dice middleweight practitioner ##ight 206 henrik parlor orion angered lac python blurted ##rri sensual intends swings angled ##phs husky attain peerage precinct textiles cheltenham shuffled dai confess tasting bhutan ##riation tyrone segregation abrupt ruiz ##rish smirked blackwell confidential browning amounted ##put vase scarce fabulous raided staple guyana unemployed glider shay ##tow carmine troll intervene squash superstar ##uce cylindrical len roadway researched handy ##rium ##jana meta lao declares ##rring ##tadt ##elin ##kova willem shrubs napoleonic realms skater qi volkswagen ##ł tad hara archaeologist awkwardly eerie ##kind wiley ##heimer ##24 titus organizers cfl crusaders lama usb vent enraged thankful occupants maximilian ##gaard possessing textbooks ##oran collaborator quaker ##ulo avalanche mono silky straits isaiah mustang surged resolutions potomac descend cl kilograms plato strains saturdays ##olin bernstein ##ype holstein ponytail ##watch belize conversely heroine perpetual ##ylus charcoal piedmont glee negotiating backdrop prologue ##jah ##mmy pasadena climbs ramos sunni ##holm ##tner ##tri anand deficiency hertfordshire stout ##avi aperture orioles ##irs doncaster intrigued bombed coating otis ##mat cocktail ##jit ##eto amir arousal sar ##proof ##act ##ories dixie pots ##bow whereabouts 159 ##fted drains bullying cottages scripture coherent fore poe appetite ##uration sampled ##ators ##dp derrick rotor jays peacock installment ##rro advisors ##coming rodeo scotch ##mot ##db ##fen ##vant ensued rodrigo dictatorship martyrs twenties ##н towed incidence marta rainforest sai scaled ##cles oceanic qualifiers symphonic mcbride dislike generalized aubrey colonization ##iation ##lion ##ssing disliked lublin salesman ##ulates spherical whatsoever sweating avalon contention punt severity alderman atari ##dina ##grant ##rop scarf seville vertices annexation fairfield fascination inspiring launches palatinate regretted ##rca feral ##iom elk nap olsen reddy yong ##leader ##iae garment transports feng gracie outrage viceroy insides ##esis breakup grady organizer softer grimaced 222 murals galicia arranging vectors ##rsten bas ##sb ##cens sloan ##eka bitten ara fender nausea bumped kris banquet comrades detector persisted ##llan adjustment endowed cinemas ##shot sellers ##uman peek epa kindly neglect simpsons talon mausoleum runaway hangul lookout ##cic rewards coughed acquainted chloride ##ald quicker accordion neolithic ##qa artemis coefficient lenny pandora tx ##xed ecstasy litter segunda chairperson gemma hiss rumor vow nasal antioch compensate patiently transformers ##eded judo morrow penis posthumous philips bandits husbands denote flaming ##any ##phones langley yorker 1760 walters ##uo ##kle gubernatorial fatty samsung leroy outlaw ##nine unpublished poole jakob ##ᵢ ##ₙ crete distorted superiority ##dhi intercept crust mig claus crashes positioning 188 stallion 301 frontal armistice ##estinal elton aj encompassing camel commemorated malaria woodward calf cigar penetrate ##oso willard ##rno ##uche illustrate amusing convergence noteworthy ##lma ##rva journeys realise manfred ##sable 410 ##vocation hearings fiance ##posed educators provoked adjusting ##cturing modular stockton paterson vlad rejects electors selena maureen ##tres uber ##rce swirled ##num proportions nanny pawn naturalist parma apostles awoke ethel wen ##bey monsoon overview ##inating mccain rendition risky adorned ##ih equestrian germain nj conspicuous confirming ##yoshi shivering ##imeter milestone rumours flinched bounds smacked token ##bei lectured automobiles ##shore impacted ##iable nouns nero ##leaf ismail prostitute trams ##lace bridget sud stimulus impressions reins revolves ##oud ##gned giro honeymoon ##swell criterion ##sms ##uil libyan prefers ##osition 211 preview sucks accusation bursts metaphor diffusion tolerate faye betting cinematographer liturgical specials bitterly humboldt ##ckle flux rattled ##itzer archaeologists odor authorised marshes discretion ##ов alarmed archaic inverse ##leton explorers ##pine drummond tsunami woodlands ##minate ##tland booklet insanity owning insert crafted calculus ##tore receivers ##bt stung ##eca ##nched prevailing travellers eyeing lila graphs ##borne 178 julien ##won morale adaptive therapist erica cw libertarian bowman pitches vita ##ional crook ##ads ##entation caledonia mutiny ##sible 1840s automation ##ß flock ##pia ironic pathology ##imus remarried ##22 joker withstand energies ##att shropshire hostages madeleine tentatively conflicting mateo recipes euros ol mercenaries nico ##ndon albuquerque augmented mythical bel freud ##child cough ##lica 365 freddy lillian genetically nuremberg calder 209 bonn outdoors paste suns urgency vin restraint tyson ##cera ##selle barrage bethlehem kahn ##par mounts nippon barony happier ryu makeshift sheldon blushed castillo barking listener taped bethel fluent headlines pornography rum disclosure sighing mace doubling gunther manly ##plex rt interventions physiological forwards emerges ##tooth ##gny compliment rib recession visibly barge faults connector exquisite prefect ##rlin patio ##cured elevators brandt italics pena 173 wasp satin ea botswana graceful respectable ##jima ##rter ##oic franciscan generates ##dl alfredo disgusting ##olate ##iously sherwood warns cod promo cheryl sino ##ة ##escu twitch ##zhi brownish thom ortiz ##dron densely ##beat carmel reinforce ##bana 187 anastasia downhill vertex contaminated remembrance harmonic homework ##sol fiancee gears olds angelica loft ramsay quiz colliery sevens ##cape autism ##hil walkway ##boats ruben abnormal ounce khmer ##bbe zachary bedside morphology punching ##olar sparrow convinces ##35 hewitt queer remastered rods mabel solemn notified lyricist symmetric ##xide 174 encore passports wildcats ##uni baja ##pac mildly ##ease bleed commodity mounds glossy orchestras ##omo damian prelude ambitions ##vet awhile remotely ##aud asserts imply ##iques distinctly modelling remedy ##dded windshield dani xiao ##endra audible powerplant 1300 invalid elemental acquisitions ##hala immaculate libby plata smuggling ventilation denoted minh ##morphism 430 differed dion kelley lore mocking sabbath spikes hygiene drown runoff stylized tally liberated aux interpreter righteous aba siren reaper pearce millie ##cier ##yra gaius ##iso captures ##ttering dorm claudio ##sic benches knighted blackness ##ored discount fumble oxidation routed ##ς novak perpendicular spoiled fracture splits ##urt pads topology ##cats axes fortunate offenders protestants esteem 221 broadband convened frankly hound prototypes isil facilitated keel ##sher sahara awaited bubba orb prosecutors 186 hem 520 ##xing relaxing remnant romney sorted slalom stefano ulrich ##active exemption folder pauses foliage hitchcock epithet 204 criticisms ##aca ballistic brody hinduism chaotic youths equals ##pala pts thicker analogous capitalist improvised overseeing sinatra ascended beverage ##tl straightforward ##kon curran ##west bois 325 induce surveying emperors sax unpopular ##kk cartoonist fused ##mble unto ##yuki localities ##cko ##ln darlington slain academie lobbying sediment puzzles ##grass defiance dickens manifest tongues alumnus arbor coincide 184 appalachian mustafa examiner cabaret traumatic yves bracelet draining heroin magnum baths odessa consonants mitsubishi ##gua kellan vaudeville ##fr joked null straps probation ##ław ceded interfaces ##pas ##zawa blinding viet 224 rothschild museo 640 huddersfield ##vr tactic ##storm brackets dazed incorrectly ##vu reg glazed fearful manifold benefited irony ##sun stumbling ##rte willingness balkans mei wraps ##aba injected ##lea gu syed harmless ##hammer bray takeoff poppy timor cardboard astronaut purdue weeping southbound cursing stalls diagonal ##neer lamar bryce comte weekdays harrington ##uba negatively ##see lays grouping ##cken ##henko affirmed halle modernist ##lai hodges smelling aristocratic baptized dismiss justification oilers ##now coupling qin snack healer ##qing gardener layla battled formulated stephenson gravitational ##gill ##jun 1768 granny coordinating suites ##cd ##ioned monarchs ##cote ##hips sep blended apr barrister deposition fia mina policemen paranoid ##pressed churchyard covert crumpled creep abandoning tr transmit conceal barr understands readiness spire ##cology ##enia ##erry 610 startling unlock vida bowled slots ##nat ##islav spaced trusting admire rig ##ink slack ##70 mv 207 casualty ##wei classmates ##odes ##rar ##rked amherst furnished evolve foundry menace mead ##lein flu wesleyan ##kled monterey webber ##vos wil ##mith ##на bartholomew justices restrained ##cke amenities 191 mediated sewage trenches ml mainz ##thus 1800s ##cula ##inski caine bonding 213 converts spheres superseded marianne crypt sweaty ensign historia ##br spruce ##post ##ask forks thoughtfully yukon pamphlet ames ##uter karma ##yya bryn negotiation sighs incapable ##mbre ##ntial actresses taft ##mill luce prevailed ##amine 1773 motionless envoy testify investing sculpted instructors provence kali cullen horseback ##while goodwin ##jos gaa norte ##ldon modify wavelength abd 214 skinned sprinter forecast scheduling marries squared tentative ##chman boer ##isch bolts swap fisherman assyrian impatiently guthrie martins murdoch 194 tanya nicely dolly lacy med ##45 syn decks fashionable millionaire ##ust surfing ##ml ##ision heaved tammy consulate attendees routinely 197 fuse saxophonist backseat malaya ##lord scowl tau ##ishly 193 sighted steaming ##rks 303 911 ##holes ##hong ching ##wife bless conserved jurassic stacey unix zion chunk rigorous blaine 198 peabody slayer dismay brewers nz ##jer det ##glia glover postwar int penetration sylvester imitation vertically airlift heiress knoxville viva ##uin 390 macon ##rim ##fighter ##gonal janice ##orescence ##wari marius belongings leicestershire 196 blanco inverted preseason sanity sobbing ##due ##elt ##dled collingwood regeneration flickering shortest ##mount ##osi feminism ##lat sherlock cabinets fumbled northbound precedent snaps ##mme researching ##akes guillaume insights manipulated vapor neighbour sap gangster frey f1 stalking scarcely callie barnett tendencies audi doomed assessing slung panchayat ambiguous bartlett ##etto distributing violating wolverhampton ##hetic swami histoire ##urus liable pounder groin hussain larsen popping surprises ##atter vie curt ##station mute relocate musicals authorization richter ##sef immortality tna bombings ##press deteriorated yiddish ##acious robbed colchester cs pmid ao verified balancing apostle swayed recognizable oxfordshire retention nottinghamshire contender judd invitational shrimp uhf ##icient cleaner longitudinal tanker ##mur acronym broker koppen sundance suppliers ##gil 4000 clipped fuels petite ##anne landslide helene diversion populous landowners auspices melville quantitative ##xes ferries nicky ##llus doo haunting roche carver downed unavailable ##pathy approximation hiroshima ##hue garfield valle comparatively keyboardist traveler ##eit congestion calculating subsidiaries ##bate serb modernization fairies deepened ville averages ##lore inflammatory tonga ##itch co₂ squads ##hea gigantic serum enjoyment retailer verona 35th cis ##phobic magna technicians ##vati arithmetic ##sport levin ##dation amtrak chow sienna ##eyer backstage entrepreneurship ##otic learnt tao ##udy worcestershire formulation baggage hesitant bali sabotage ##kari barren enhancing murmur pl freshly putnam syntax aces medicines resentment bandwidth ##sier grins chili guido ##sei framing implying gareth lissa genevieve pertaining admissions geo thorpe proliferation sato bela analyzing parting ##gor awakened ##isman huddled secrecy ##kling hush gentry 540 dungeons ##ego coasts ##utz sacrificed ##chule landowner mutually prevalence programmer adolescent disrupted seaside gee trusts vamp georgie ##nesian ##iol schedules sindh ##market etched hm sparse bey beaux scratching gliding unidentified 216 collaborating gems jesuits oro accumulation shaping mbe anal ##xin 231 enthusiasts newscast ##egan janata dewey parkinson 179 ankara biennial towering dd inconsistent 950 ##chet thriving terminate cabins furiously eats advocating donkey marley muster phyllis leiden ##user grassland glittering iucn loneliness 217 memorandum armenians ##ddle popularized rhodesia 60s lame ##illon sans bikini header orbits ##xx ##finger ##ulator sharif spines biotechnology strolled naughty yates ##wire fremantle milo ##mour abducted removes ##atin humming wonderland ##chrome ##ester hume pivotal ##rates armand grams believers elector rte apron bis scraped ##yria endorsement initials ##llation eps dotted hints buzzing emigration nearer ##tom indicators ##ulu coarse neutron protectorate ##uze directional exploits pains loire 1830s proponents guggenheim rabbits ritchie 305 hectare inputs hutton ##raz verify ##ako boilers longitude ##lev skeletal yer emilia citrus compromised ##gau pokemon prescription paragraph eduard cadillac attire categorized kenyan weddings charley ##bourg entertain monmouth ##lles nutrients davey mesh incentive practised ecosystems kemp subdued overheard ##rya bodily maxim ##nius apprenticeship ursula ##fight lodged rug silesian unconstitutional patel inspected coyote unbeaten ##hak 34th disruption convict parcel ##cl ##nham collier implicated mallory ##iac ##lab susannah winkler ##rber shia phelps sediments graphical robotic ##sner adulthood mart smoked ##isto kathryn clarified ##aran divides convictions oppression pausing burying ##mt federico mathias eileen ##tana kite hunched ##acies 189 ##atz disadvantage liza kinetic greedy paradox yokohama dowager trunks ventured ##gement gupta vilnius olaf ##thest crimean hopper ##ej progressively arturo mouthed arrondissement ##fusion rubin simulcast oceania ##orum ##stra ##rred busiest intensely navigator cary ##vine ##hini ##bies fife rowe rowland posing insurgents shafts lawsuits activate conor inward culturally garlic 265 ##eering eclectic ##hui ##kee ##nl furrowed vargas meteorological rendezvous ##aus culinary commencement ##dition quota ##notes mommy salaries overlapping mule ##iology ##mology sums wentworth ##isk ##zione mainline subgroup ##illy hack plaintiff verdi bulb differentiation engagements multinational supplemented bertrand caller regis ##naire ##sler ##arts ##imated blossom propagation kilometer viaduct vineyards ##uate beckett optimization golfer songwriters seminal semitic thud volatile evolving ridley ##wley trivial distributions scandinavia jiang ##ject wrestled insistence ##dio emphasizes napkin ##ods adjunct rhyme ##ricted ##eti hopeless surrounds tremble 32nd smoky ##ntly oils medicinal padded steer wilkes 219 255 concessions hue uniquely blinded landon yahoo ##lane hendrix commemorating dex specify chicks ##ggio intercity 1400 morley ##torm highlighting ##oting pang oblique stalled ##liner flirting newborn 1769 bishopric shaved 232 currie ##ush dharma spartan ##ooped favorites smug novella sirens abusive creations espana ##lage paradigm semiconductor sheen ##rdo ##yen ##zak nrl renew ##pose ##tur adjutant marches norma ##enity ineffective weimar grunt ##gat lordship plotting expenditure infringement lbs refrain av mimi mistakenly postmaster 1771 ##bara ras motorsports tito 199 subjective ##zza bully stew ##kaya prescott 1a ##raphic ##zam bids styling paranormal reeve sneaking exploding katz akbar migrant syllables indefinitely ##ogical destroys replaces applause ##phine pest ##fide 218 articulated bertie ##thing ##cars ##ptic courtroom crowley aesthetics cummings tehsil hormones titanic dangerously ##ibe stadion jaenelle auguste ciudad ##chu mysore partisans ##sio lucan philipp ##aly debating henley interiors ##rano ##tious homecoming beyonce usher henrietta prepares weeds ##oman ely plucked ##pire ##dable luxurious ##aq artifact password pasture juno maddy minsk ##dder ##ologies ##rone assessments martian royalist 1765 examines ##mani ##rge nino 223 parry scooped relativity ##eli ##uting ##cao congregational noisy traverse ##agawa strikeouts nickelodeon obituary transylvania binds depictions polk trolley ##yed ##lard breeders ##under dryly hokkaido 1762 strengths stacks bonaparte connectivity neared prostitutes stamped anaheim gutierrez sinai ##zzling bram fresno madhya ##86 proton ##lena ##llum ##phon reelected wanda ##anus ##lb ample distinguishing ##yler grasping sermons tomato bland stimulation avenues ##eux spreads scarlett fern pentagon assert baird chesapeake ir calmed distortion fatalities ##olis correctional pricing ##astic ##gina prom dammit ying collaborate ##chia welterweight 33rd pointer substitution bonded umpire communicating multitude paddle ##obe federally intimacy ##insky betray ssr ##lett ##lean ##lves ##therapy airbus ##tery functioned ud bearer biomedical netflix ##hire ##nca condom brink ik ##nical macy ##bet flap gma experimented jelly lavender ##icles ##ulia munro ##mian ##tial rye ##rle 60th gigs hottest rotated predictions fuji bu ##erence ##omi barangay ##fulness ##sas clocks ##rwood ##liness cereal roe wight decker uttered babu onion xml forcibly ##df petra sarcasm hartley peeled storytelling ##42 ##xley ##ysis ##ffa fibre kiel auditor fig harald greenville ##berries geographically nell quartz ##athic cemeteries ##lr crossings nah holloway reptiles chun sichuan snowy 660 corrections ##ivo zheng ambassadors blacksmith fielded fluids hardcover turnover medications melvin academies ##erton ro roach absorbing spaniards colton ##founded outsider espionage kelsey 245 edible ##ulf dora establishes ##sham ##tries contracting ##tania cinematic costello nesting ##uron connolly duff ##nology mma ##mata fergus sexes gi optics spectator woodstock banning ##hee ##fle differentiate outfielder refinery 226 312 gerhard horde lair drastically ##udi landfall ##cheng motorsport odi ##achi predominant quay skins ##ental edna harshly complementary murdering ##aves wreckage ##90 ono outstretched lennox munitions galen reconcile 470 scalp bicycles gillespie questionable rosenberg guillermo hostel jarvis kabul volvo opium yd ##twined abuses decca outpost ##cino sensible neutrality ##64 ponce anchorage atkins turrets inadvertently disagree libre vodka reassuring weighs ##yal glide jumper ceilings repertory outs stain ##bial envy ##ucible smashing heightened policing hyun mixes lai prima ##ples celeste ##bina lucrative intervened kc manually ##rned stature staffed bun bastards nairobi priced ##auer thatcher ##kia tripped comune ##ogan ##pled brasil incentives emanuel hereford musica ##kim benedictine biennale ##lani eureka gardiner rb knocks sha ##ael ##elled ##onate efficacy ventura masonic sanford maize leverage ##feit capacities santana ##aur novelty vanilla ##cter ##tour benin ##oir ##rain neptune drafting tallinn ##cable humiliation ##boarding schleswig fabian bernardo liturgy spectacle sweeney pont routledge ##tment cosmos ut hilt sleek universally ##eville ##gawa typed ##dry favors allegheny glaciers ##rly recalling aziz ##log parasite requiem auf ##berto ##llin illumination ##breaker ##issa festivities bows govern vibe vp 333 sprawled larson pilgrim bwf leaping ##rts ##ssel alexei greyhound hoarse ##dler ##oration seneca ##cule gaping ##ulously ##pura cinnamon ##gens ##rricular craven fantasies houghton engined reigned dictator supervising ##oris bogota commentaries unnatural fingernails spirituality tighten ##tm canadiens protesting intentional cheers sparta ##ytic ##iere ##zine widen belgarath controllers dodd iaaf navarre ##ication defect squire steiner whisky ##mins 560 inevitably tome ##gold chew ##uid ##lid elastic ##aby streaked alliances jailed regal ##ined ##phy czechoslovak narration absently ##uld bluegrass guangdong quran criticizing hose hari ##liest ##owa skier streaks deploy ##lom raft bose dialed huff ##eira haifa simplest bursting endings ib sultanate ##titled franks whitman ensures sven ##ggs collaborators forster organising ui banished napier injustice teller layered thump ##otti roc battleships evidenced fugitive sadie robotics ##roud equatorial geologist ##iza yielding ##bron ##sr internationale mecca ##diment sbs skyline toad uploaded reflective undrafted lal leafs bayern ##dai lakshmi shortlisted ##stick ##wicz camouflage donate af christi lau ##acio disclosed nemesis 1761 assemble straining northamptonshire tal ##asi bernardino premature heidi 42nd coefficients galactic reproduce buzzed sensations zionist monsieur myrtle ##eme archery strangled musically viewpoint antiquities bei trailers seahawks cured pee preferring tasmanian lange sul ##mail ##working colder overland lucivar massey gatherings haitian ##smith disapproval flaws ##cco ##enbach 1766 npr ##icular boroughs creole forums techno 1755 dent abdominal streetcar ##eson ##stream procurement gemini predictable ##tya acheron christoph feeder fronts vendor bernhard jammu tumors slang ##uber goaltender twists curving manson vuelta mer peanut confessions pouch unpredictable allowance theodor vascular ##factory bala authenticity metabolic coughing nanjing ##cea pembroke ##bard splendid 36th ff hourly ##ahu elmer handel ##ivate awarding thrusting dl experimentation ##hesion ##46 caressed entertained steak ##rangle biologist orphans baroness oyster stepfather ##dridge mirage reefs speeding ##31 barons 1764 227 inhabit preached repealed ##tral honoring boogie captives administer johanna ##imate gel suspiciously 1767 sobs ##dington backbone hayward garry ##folding ##nesia maxi ##oof ##ppe ellison galileo ##stand crimea frenzy amour bumper matrices natalia baking garth palestinians ##grove smack conveyed ensembles gardening ##manship ##rup ##stituting 1640 harvesting topography jing shifters dormitory ##carriage ##lston ist skulls ##stadt dolores jewellery sarawak ##wai ##zier fences christy confinement tumbling credibility fir stench ##bria ##plication ##nged ##sam virtues ##belt marjorie pba ##eem ##made celebrates schooner agitated barley fulfilling anthropologist ##pro restrict novi regulating ##nent padres ##rani ##hesive loyola tabitha milky olson proprietor crambidae guarantees intercollegiate ljubljana hilda ##sko ignorant hooded ##lts sardinia ##lidae ##vation frontman privileged witchcraft ##gp jammed laude poking ##than bracket amazement yunnan ##erus maharaja linnaeus 264 commissioning milano peacefully ##logies akira rani regulator ##36 grasses ##rance luzon crows compiler gretchen seaman edouard tab buccaneers ellington hamlets whig socialists ##anto directorial easton mythological ##kr ##vary rhineland semantic taut dune inventions succeeds ##iter replication branched ##pired jul prosecuted kangaroo penetrated ##avian middlesbrough doses bleak madam predatory relentless ##vili reluctance ##vir hailey crore silvery 1759 monstrous swimmers transmissions hawthorn informing ##eral toilets caracas crouch kb ##sett 295 cartel hadley ##aling alexia yvonne ##biology cinderella eton superb blizzard stabbing industrialist maximus ##gm ##orus groves maud clade oversized comedic ##bella rosen nomadic fulham montane beverages galaxies redundant swarm ##rot ##folia ##llis buckinghamshire fen bearings bahadur ##rom gilles phased dynamite faber benoit vip ##ount ##wd booking fractured tailored anya spices westwood cairns auditions inflammation steamed ##rocity ##acion ##urne skyla thereof watford torment archdeacon transforms lulu demeanor fucked serge ##sor mckenna minas entertainer ##icide caress originate residue ##sty 1740 ##ilised ##org beech ##wana subsidies ##ghton emptied gladstone ru firefighters voodoo ##rcle het nightingale tamara edmond ingredient weaknesses silhouette 285 compatibility withdrawing hampson ##mona anguish giggling ##mber bookstore ##jiang southernmost tilting ##vance bai economical rf briefcase dreadful hinted projections shattering totaling ##rogate analogue indicted periodical fullback ##dman haynes ##tenberg ##ffs ##ishment 1745 thirst stumble penang vigorous ##ddling ##kor ##lium octave ##ove ##enstein ##inen ##ones siberian ##uti cbn repeal swaying ##vington khalid tanaka unicorn otago plastered lobe riddle ##rella perch ##ishing croydon filtered graeme tripoli ##ossa crocodile ##chers sufi mined ##tung inferno lsu ##phi swelled utilizes £2 cale periodicals styx hike informally coop lund ##tidae ala hen qui transformations disposed sheath chickens ##cade fitzroy sas silesia unacceptable odisha 1650 sabrina pe spokane ratios athena massage shen dilemma ##drum ##riz ##hul corona doubtful niall ##pha ##bino fines cite acknowledging bangor ballard bathurst ##resh huron mustered alzheimer garments kinase tyre warship ##cp flashback pulmonary braun cheat kamal cyclists constructions grenades ndp traveller excuses stomped signalling trimmed futsal mosques relevance ##wine wta ##23 ##vah ##lter hoc ##riding optimistic ##´s deco sim interacting rejecting moniker waterways ##ieri ##oku mayors gdansk outnumbered pearls ##ended ##hampton fairs totals dominating 262 notions stairway compiling pursed commodities grease yeast ##jong carthage griffiths residual amc contraction laird sapphire ##marine ##ivated amalgamation dissolve inclination lyle packaged altitudes suez canons graded lurched narrowing boasts guise wed enrico ##ovsky rower scarred bree cub iberian protagonists bargaining proposing trainers voyages vans fishes ##aea ##ivist ##verance encryption artworks kazan sabre cleopatra hepburn rotting supremacy mecklenburg ##brate burrows hazards outgoing flair organizes ##ctions scorpion ##usions boo 234 chevalier dunedin slapping ##34 ineligible pensions ##38 ##omic manufactures emails bismarck 238 weakening blackish ding mcgee quo ##rling northernmost xx manpower greed sampson clicking ##ange ##horpe ##inations ##roving torre ##eptive ##moral symbolism 38th asshole meritorious outfits splashed biographies sprung astros ##tale 302 737 filly raoul nw tokugawa linden clubhouse ##apa tracts romano ##pio putin tags ##note chained dickson gunshot moe gunn rashid ##tails zipper ##bas ##nea contrasted ##ply ##udes plum pharaoh ##pile aw comedies ingrid sandwiches subdivisions 1100 mariana nokia kamen hz delaney veto herring ##words possessive outlines ##roup siemens stairwell rc gallantry messiah palais yells 233 zeppelin ##dm bolivar ##cede smackdown mckinley ##mora ##yt muted geologic finely unitary avatar hamas maynard rees bog contrasting ##rut liv chico disposition pixel ##erate becca dmitry yeshiva narratives ##lva ##ulton mercenary sharpe tempered navigate stealth amassed keynes ##lini untouched ##rrie havoc lithium ##fighting abyss graf southward wolverine balloons implements ngos transitions ##icum ambushed concacaf dormant economists ##dim costing csi rana universite boulders verity ##llon collin mellon misses cypress fluorescent lifeless spence ##ulla crewe shepard pak revelations ##م jolly gibbons paw ##dro ##quel freeing ##test shack fries palatine ##51 ##hiko accompaniment cruising recycled ##aver erwin sorting synthesizers dyke realities sg strides enslaved wetland ##ghan competence gunpowder grassy maroon reactors objection ##oms carlson gearbox macintosh radios shelton ##sho clergyman prakash 254 mongols trophies oricon 228 stimuli twenty20 cantonese cortes mirrored ##saurus bhp cristina melancholy ##lating enjoyable nuevo ##wny downfall schumacher ##ind banging lausanne rumbled paramilitary reflex ax amplitude migratory ##gall ##ups midi barnard lastly sherry ##hp ##nall keystone ##kra carleton slippery ##53 coloring foe socket otter ##rgos mats ##tose consultants bafta bison topping ##km 490 primal abandonment transplant atoll hideous mort pained reproduced tae howling ##turn unlawful billionaire hotter poised lansing ##chang dinamo retro messing nfc domesday ##mina blitz timed ##athing ##kley ascending gesturing ##izations signaled tis chinatown mermaid savanna jameson ##aint catalina ##pet ##hers cochrane cy chatting ##kus alerted computation mused noelle majestic mohawk campo octagonal ##sant ##hend 241 aspiring ##mart comprehend iona paralyzed shimmering swindon rhone ##eley reputed configurations pitchfork agitation francais gillian lipstick ##ilo outsiders pontifical resisting bitterness sewer rockies ##edd ##ucher misleading 1756 exiting galloway ##nging risked ##heart 246 commemoration schultz ##rka integrating ##rsa poses shrieked ##weiler guineas gladys jerking owls goldsmith nightly penetrating ##unced lia ##33 ignited betsy ##aring ##thorpe follower vigorously ##rave coded kiran knit zoology tbilisi ##28 ##bered repository govt deciduous dino growling ##bba enhancement unleashed chanting pussy biochemistry ##eric kettle repression toxicity nrhp ##arth ##kko ##bush ernesto commended outspoken 242 mca parchment sms kristen ##aton bisexual raked glamour navajo a2 conditioned showcased ##hma spacious youthful ##esa usl appliances junta brest layne conglomerate enchanted chao loosened picasso circulating inspect montevideo ##centric ##kti piazza spurred ##aith bari freedoms poultry stamford lieu ##ect indigo sarcastic bahia stump attach dvds frankenstein lille approx scriptures pollen ##script nmi overseen ##ivism tides proponent newmarket inherit milling ##erland centralized ##rou distributors credentials drawers abbreviation ##lco ##xon downing uncomfortably ripe ##oes erase franchises ##ever populace ##bery ##khar decomposition pleas ##tet daryl sabah ##stle ##wide fearless genie lesions annette ##ogist oboe appendix nair dripped petitioned maclean mosquito parrot rpg hampered 1648 operatic reservoirs ##tham irrelevant jolt summarized ##fp medallion ##taff ##− clawed harlow narrower goddard marcia bodied fremont suarez altering tempest mussolini porn ##isms sweetly oversees walkers solitude grimly shrines hk ich supervisors hostess dietrich legitimacy brushes expressive ##yp dissipated ##rse localized systemic ##nikov gettysburg ##js ##uaries dialogues muttering 251 housekeeper sicilian discouraged ##frey beamed kaladin halftime kidnap ##amo ##llet 1754 synonymous depleted instituto insulin reprised ##opsis clashed ##ctric interrupting radcliffe insisting medici 1715 ejected playfully turbulent ##47 starvation ##rini shipment rebellious petersen verification merits ##rified cakes ##charged 1757 milford shortages spying fidelity ##aker emitted storylines harvested seismic ##iform cheung kilda theoretically barbie lynx ##rgy ##tius goblin mata poisonous ##nburg reactive residues obedience ##евич conjecture ##rac 401 hating sixties kicker moaning motown ##bha emancipation neoclassical ##hering consoles ebert professorship ##tures sustaining assaults obeyed affluent incurred tornadoes ##eber ##zow emphasizing highlanders cheated helmets ##ctus internship terence bony executions legislators berries peninsular tinged ##aco 1689 amplifier corvette ribbons lavish pennant ##lander worthless ##chfield ##forms mariano pyrenees expenditures ##icides chesterfield mandir tailor 39th sergey nestled willed aristocracy devotees goodnight raaf rumored weaponry remy appropriations harcourt burr riaa ##lence limitation unnoticed guo soaking swamps ##tica collapsing tatiana descriptive brigham psalm ##chment maddox ##lization patti caliph ##aja akron injuring serra ##ganj basins ##sari astonished launcher ##church hilary wilkins sewing ##sf stinging ##fia ##ncia underwood startup ##ition compilations vibrations embankment jurist ##nity bard juventus groundwater kern palaces helium boca cramped marissa soto ##worm jae princely ##ggy faso bazaar warmly ##voking 229 pairing ##lite ##grate ##nets wien freaked ulysses rebirth ##alia ##rent mummy guzman jimenez stilled ##nitz trajectory tha woken archival professions ##pts ##pta hilly shadowy shrink ##bolt norwood glued migrate stereotypes devoid ##pheus 625 evacuate horrors infancy gotham knowles optic downloaded sachs kingsley parramatta darryl mor ##onale shady commence confesses kan ##meter ##placed marlborough roundabout regents frigates io ##imating gothenburg revoked carvings clockwise convertible intruder ##sche banged ##ogo vicky bourgeois ##mony dupont footing ##gum pd ##real buckle yun penthouse sane 720 serviced stakeholders neumann bb ##eers comb ##gam catchment pinning rallies typing ##elles forefront freiburg sweetie giacomo widowed goodwill worshipped aspirations midday ##vat fishery ##trick bournemouth turk 243 hearth ethanol guadalajara murmurs sl ##uge afforded scripted ##hta wah ##jn coroner translucent 252 memorials puck progresses clumsy ##race 315 candace recounted ##27 ##slin ##uve filtering ##mac howl strata heron leveled ##ays dubious ##oja ##т ##wheel citations exhibiting ##laya ##mics ##pods turkic ##lberg injunction ##ennial ##mit antibodies ##44 organise ##rigues cardiovascular cushion inverness ##zquez dia cocoa sibling ##tman ##roid expanse feasible tunisian algiers ##relli rus bloomberg dso westphalia bro tacoma 281 downloads ##ours konrad duran ##hdi continuum jett compares legislator secession ##nable ##gues ##zuka translating reacher ##gley ##ła aleppo ##agi tc orchards trapping linguist versatile drumming postage calhoun superiors ##mx barefoot leary ##cis ignacio alfa kaplan ##rogen bratislava mori ##vot disturb haas 313 cartridges gilmore radiated salford tunic hades ##ulsive archeological delilah magistrates auditioned brewster charters empowerment blogs cappella dynasties iroquois whipping ##krishna raceway truths myra weaken judah mcgregor ##horse mic refueling 37th burnley bosses markus premio query ##gga dunbar ##economic darkest lyndon sealing commendation reappeared ##mun addicted ezio slaughtered satisfactory shuffle ##eves ##thic ##uj fortification warrington ##otto resurrected fargo mane ##utable ##lei ##space foreword ox ##aris ##vern abrams hua ##mento sakura ##alo uv sentimental ##skaya midfield ##eses sturdy scrolls macleod ##kyu entropy ##lance mitochondrial cicero excelled thinner convoys perceive ##oslav ##urable systematically grind burkina 287 ##tagram ops ##aman guantanamo ##cloth ##tite forcefully wavy ##jou pointless ##linger ##tze layton portico superficial clerical outlaws ##hism burials muir ##inn creditors hauling rattle ##leg calais monde archers reclaimed dwell wexford hellenic falsely remorse ##tek dough furnishings ##uttered gabon neurological novice ##igraphy contemplated pulpit nightstand saratoga ##istan documenting pulsing taluk ##firmed busted marital ##rien disagreements wasps ##yes hodge mcdonnell mimic fran pendant dhabi musa ##nington congratulations argent darrell concussion losers regrets thessaloniki reversal donaldson hardwood thence achilles ritter ##eran demonic jurgen prophets goethe eki classmate buff ##cking yank irrational ##inging perished seductive qur sourced ##crat ##typic mustard ravine barre horizontally characterization phylogenetic boise ##dit ##runner ##tower brutally intercourse seduce ##bbing fay ferris ogden amar nik unarmed ##inator evaluating kyrgyzstan sweetness ##lford ##oki mccormick meiji notoriety stimulate disrupt figuring instructional mcgrath ##zoo groundbreaking ##lto flinch khorasan agrarian bengals mixer radiating ##sov ingram pitchers nad tariff ##cript tata ##codes ##emi ##ungen appellate lehigh ##bled ##giri brawl duct texans ##ciation ##ropolis skipper speculative vomit doctrines stresses 253 davy graders whitehead jozef timely cumulative haryana paints appropriately boon cactus ##ales ##pid dow legions ##pit perceptions 1730 picturesque ##yse periphery rune wr ##aha celtics sentencing whoa ##erin confirms variance 425 moines mathews spade rave m1 fronted fx blending alleging reared ##gl 237 ##paper grassroots eroded ##free ##physical directs ordeal ##sław accelerate hacker rooftop ##inia lev buys cebu devote ##lce specialising ##ulsion choreographed repetition warehouses ##ryl paisley tuscany analogy sorcerer hash huts shards descends exclude nix chaplin gaga ito vane ##drich causeway misconduct limo orchestrated glands jana ##kot u2 ##mple ##sons branching contrasts scoop longed ##virus chattanooga ##75 syrup cornerstone ##tized ##mind ##iaceae careless precedence frescoes ##uet chilled consult modelled snatch peat ##thermal caucasian humane relaxation spins temperance ##lbert occupations lambda hybrids moons mp3 ##oese 247 rolf societal yerevan ness ##ssler befriended mechanized nominate trough boasted cues seater ##hom bends ##tangle conductors emptiness ##lmer eurasian adriatic tian ##cie anxiously lark propellers chichester jock ev 2a ##holding credible recounts tori loyalist abduction ##hoot ##redo nepali ##mite ventral tempting ##ango ##crats steered ##wice javelin dipping laborers prentice looming titanium ##ː badges emir tensor ##ntation egyptians rash denies hawthorne lombard showers wehrmacht dietary trojan ##reus welles executing horseshoe lifeboat ##lak elsa infirmary nearing roberta boyer mutter trillion joanne ##fine ##oked sinks vortex uruguayan clasp sirius ##block accelerator prohibit sunken byu chronological diplomats ochreous 510 symmetrical 1644 maia ##tology salts reigns atrocities ##ия hess bared issn ##vyn cater saturated ##cycle ##isse sable voyager dyer yusuf ##inge fountains wolff ##39 ##nni engraving rollins atheist ominous ##ault herr chariot martina strung ##fell ##farlane horrific sahib gazes saetan erased ptolemy ##olic flushing lauderdale analytic ##ices 530 navarro beak gorilla herrera broom guadalupe raiding sykes 311 bsc deliveries 1720 invasions carmichael tajikistan thematic ecumenical sentiments onstage ##rians ##brand ##sume catastrophic flanks molten ##arns waller aimee terminating ##icing alternately ##oche nehru printers outraged ##eving empires template banners repetitive za ##oise vegetarian ##tell guiana opt cavendish lucknow synthesized ##hani ##mada finalized ##ctable fictitious mayoral unreliable ##enham embracing peppers rbis ##chio ##neo inhibition slashed togo orderly embroidered safari salty 236 barron benito totaled ##dak pubs simulated caden devin tolkien momma welding sesame ##ept gottingen hardness 630 shaman temeraire 620 adequately pediatric ##kit ck assertion radicals composure cadence seafood beaufort lazarus mani warily cunning kurdistan 249 cantata ##kir ares ##41 ##clusive nape townland geared insulted flutter boating violate draper dumping malmo ##hh ##romatic firearm alta bono obscured ##clave exceeds panorama unbelievable ##train preschool ##essed disconnected installing rescuing secretaries accessibility ##castle ##drive ##ifice ##film bouts slug waterway mindanao ##buro ##ratic halves ##ل calming liter maternity adorable bragg electrification mcc ##dote roxy schizophrenia ##body munoz kaye whaling 239 mil tingling tolerant ##ago unconventional volcanoes ##finder deportivo ##llie robson kaufman neuroscience wai deportation masovian scraping converse ##bh hacking bulge ##oun administratively yao 580 amp mammoth booster claremont hooper nomenclature pursuits mclaughlin melinda ##sul catfish barclay substrates taxa zee originals kimberly packets padma ##ality borrowing ostensibly solvent ##bri ##genesis ##mist lukas shreveport veracruz ##ь ##lou ##wives cheney tt anatolia hobbs ##zyn cyclic radiant alistair greenish siena dat independents ##bation conform pieter hyper applicant bradshaw spores telangana vinci inexpensive nuclei 322 jang nme soho spd ##ign cradled receptionist pow ##43 ##rika fascism ##ifer experimenting ##ading ##iec ##region 345 jocelyn maris stair nocturnal toro constabulary elgin ##kker msc ##giving ##schen ##rase doherty doping sarcastically batter maneuvers ##cano ##apple ##gai ##git intrinsic ##nst ##stor 1753 showtime cafes gasps lviv ushered ##thed fours restart astonishment transmitting flyer shrugs ##sau intriguing cones dictated mushrooms medial ##kovsky ##elman escorting gaped ##26 godfather ##door ##sell djs recaptured timetable vila 1710 3a aerodrome mortals scientology ##orne angelina mag convection unpaid insertion intermittent lego ##nated endeavor kota pereira ##lz 304 bwv glamorgan insults agatha fey ##cend fleetwood mahogany protruding steamship zeta ##arty mcguire suspense ##sphere advising urges ##wala hurriedly meteor gilded inline arroyo stalker ##oge excitedly revered ##cure earle introductory ##break ##ilde mutants puff pulses reinforcement ##haling curses lizards stalk correlated ##fixed fallout macquarie ##unas bearded denton heaving 802 ##ocation winery assign dortmund ##lkirk everest invariant charismatic susie ##elling bled lesley telegram sumner bk ##ogen ##к wilcox needy colbert duval ##iferous ##mbled allotted attends imperative ##hita replacements hawker ##inda insurgency ##zee ##eke casts ##yla 680 ives transitioned ##pack ##powering authoritative baylor flex cringed plaintiffs woodrow ##skie drastic ape aroma unfolded commotion nt preoccupied theta routines lasers privatization wand domino ek clenching nsa strategically showered bile handkerchief pere storing christophe insulting 316 nakamura romani asiatic magdalena palma cruises stripping 405 konstantin soaring ##berman colloquially forerunner havilland incarcerated parasites sincerity ##utus disks plank saigon ##ining corbin homo ornaments powerhouse ##tlement chong fastened feasibility idf morphological usable ##nish ##zuki aqueduct jaguars keepers ##flies aleksandr faust assigns ewing bacterium hurled tricky hungarians integers wallis 321 yamaha ##isha hushed oblivion aviator evangelist friars ##eller monograph ode ##nary airplanes labourers charms ##nee 1661 hagen tnt rudder fiesta transcript dorothea ska inhibitor maccabi retorted raining encompassed clauses menacing 1642 lineman ##gist vamps ##ape ##dick gloom ##rera dealings easing seekers ##nut ##pment helens unmanned ##anu ##isson basics ##amy ##ckman adjustments 1688 brutality horne ##zell sui ##55 ##mable aggregator ##thal rhino ##drick ##vira counters zoom ##01 ##rting mn montenegrin packard ##unciation ##♭ ##kki reclaim scholastic thugs pulsed ##icia syriac quan saddam banda kobe blaming buddies dissent ##lusion ##usia corbett jaya delle erratic lexie ##hesis 435 amiga hermes ##pressing ##leen chapels gospels jamal ##uating compute revolving warp ##sso ##thes armory ##eras ##gol antrim loki ##kow ##asian ##good ##zano braid handwriting subdistrict funky pantheon ##iculate concurrency estimation improper juliana ##his newcomers johnstone staten communicated ##oco ##alle sausage stormy ##stered ##tters superfamily ##grade acidic collateral tabloid ##oped ##rza bladder austen ##ellant mcgraw ##hay hannibal mein aquino lucifer wo badger boar cher christensen greenberg interruption ##kken jem 244 mocked bottoms cambridgeshire ##lide sprawling ##bbly eastwood ghent synth ##buck advisers ##bah nominally hapoel qu daggers estranged fabricated towels vinnie wcw misunderstanding anglia nothin unmistakable ##dust ##lova chilly marquette truss ##edge ##erine reece ##lty ##chemist ##connected 272 308 41st bash raion waterfalls ##ump ##main labyrinth queue theorist ##istle bharatiya flexed soundtracks rooney leftist patrolling wharton plainly alleviate eastman schuster topographic engages immensely unbearable fairchild 1620 dona lurking parisian oliveira ia indictment hahn bangladeshi ##aster vivo ##uming ##ential antonia expects indoors kildare harlan ##logue ##ogenic ##sities forgiven ##wat childish tavi ##mide ##orra plausible grimm successively scooted ##bola ##dget ##rith spartans emery flatly azure epilogue ##wark flourish ##iny ##tracted ##overs ##oshi bestseller distressed receipt spitting hermit topological ##cot drilled subunit francs ##layer eel ##fk ##itas octopus footprint petitions ufo ##say ##foil interfering leaking palo ##metry thistle valiant ##pic narayan mcpherson ##fast gonzales ##ym ##enne dustin novgorod solos ##zman doin ##raph ##patient ##meyer soluble ashland cuffs carole pendleton whistling vassal ##river deviation revisited constituents rallied rotate loomed ##eil ##nting amateurs augsburg auschwitz crowns skeletons ##cona bonnet 257 dummy globalization simeon sleeper mandal differentiated ##crow ##mare milne bundled exasperated talmud owes segregated ##feng ##uary dentist piracy props ##rang devlin ##torium malicious paws ##laid dependency ##ergy ##fers ##enna 258 pistons rourke jed grammatical tres maha wig 512 ghostly jayne ##achal ##creen ##ilis ##lins ##rence designate ##with arrogance cambodian clones showdown throttle twain ##ception lobes metz nagoya 335 braking ##furt 385 roaming ##minster amin crippled ##37 ##llary indifferent hoffmann idols intimidating 1751 261 influenza memo onions 1748 bandage consciously ##landa ##rage clandestine observes swiped tangle ##ener ##jected ##trum ##bill ##lta hugs congresses josiah spirited ##dek humanist managerial filmmaking inmate rhymes debuting grimsby ur ##laze duplicate vigor ##tf republished bolshevik refurbishment antibiotics martini methane newscasts royale horizons levant iain visas ##ischen paler ##around manifestation snuck alf chop futile pedestal rehab ##kat bmg kerman res fairbanks jarrett abstraction saharan ##zek 1746 procedural clearer kincaid sash luciano ##ffey crunch helmut ##vara revolutionaries ##tute creamy leach ##mmon 1747 permitting nes plight wendell ##lese contra ts clancy ipa mach staples autopsy disturbances nueva karin pontiac ##uding proxy venerable haunt leto bergman expands ##helm wal ##pipe canning celine cords obesity ##enary intrusion planner ##phate reasoned sequencing 307 harrow ##chon ##dora marred mcintyre repay tarzan darting 248 harrisburg margarita repulsed ##hur ##lding belinda hamburger novo compliant runways bingham registrar skyscraper ic cuthbert improvisation livelihood ##corp ##elial admiring ##dened sporadic believer casablanca popcorn ##29 asha shovel ##bek ##dice coiled tangible ##dez casper elsie resin tenderness rectory ##ivision avail sonar ##mori boutique ##dier guerre bathed upbringing vaulted sandals blessings ##naut ##utnant 1680 306 foxes pia corrosion hesitantly confederates crystalline footprints shapiro tirana valentin drones 45th microscope shipments texted inquisition wry guernsey unauthorized resigning 760 ripple schubert stu reassure felony ##ardo brittle koreans ##havan ##ives dun implicit tyres ##aldi ##lth magnolia ##ehan ##puri ##poulos aggressively fei gr familiarity ##poo indicative ##trust fundamentally jimmie overrun 395 anchors moans ##opus britannia armagh ##ggle purposely seizing ##vao bewildered mundane avoidance cosmopolitan geometridae quartermaster caf 415 chatter engulfed gleam purge ##icate juliette jurisprudence guerra revisions ##bn casimir brew ##jm 1749 clapton cloudy conde hermitage 278 simulations torches vincenzo matteo ##rill hidalgo booming westbound accomplishment tentacles unaffected ##sius annabelle flopped sloping ##litz dreamer interceptor vu ##loh consecration copying messaging breaker climates hospitalized 1752 torino afternoons winfield witnessing ##teacher breakers choirs sawmill coldly ##ege sipping haste uninhabited conical bibliography pamphlets severn edict ##oca deux illnesses grips ##pl rehearsals sis thinkers tame ##keepers 1690 acacia reformer ##osed ##rys shuffling ##iring ##shima eastbound ionic rhea flees littered ##oum rocker vomiting groaning champ overwhelmingly civilizations paces sloop adoptive ##tish skaters ##vres aiding mango ##joy nikola shriek ##ignon pharmaceuticals ##mg tuna calvert gustavo stocked yearbook ##urai ##mana computed subsp riff hanoi kelvin hamid moors pastures summons jihad nectar ##ctors bayou untitled pleasing vastly republics intellect ##η ##ulio ##tou crumbling stylistic sb ##ی consolation frequented h₂o walden widows ##iens 404 ##ignment chunks improves 288 grit recited ##dev snarl sociological ##arte ##gul inquired ##held bruise clube consultancy homogeneous hornets multiplication pasta prick savior ##grin ##kou ##phile yoon ##gara grimes vanishing cheering reacting bn distillery ##quisite ##vity coe dockyard massif ##jord escorts voss ##valent byte chopped hawke illusions workings floats ##koto ##vac kv annapolis madden ##onus alvaro noctuidae ##cum ##scopic avenge steamboat forte illustrates erika ##trip 570 dew nationalities bran manifested thirsty diversified muscled reborn ##standing arson ##lessness ##dran ##logram ##boys ##kushima ##vious willoughby ##phobia 286 alsace dashboard yuki ##chai granville myspace publicized tricked ##gang adjective ##ater relic reorganisation enthusiastically indications saxe ##lassified consolidate iec padua helplessly ramps renaming regulars pedestrians accents convicts inaccurate lowers mana ##pati barrie bjp outta someplace berwick flanking invoked marrow sparsely excerpts clothed rei ##ginal wept ##straße ##vish alexa excel ##ptive membranes aquitaine creeks cutler sheppard implementations ns ##dur fragrance budge concordia magnesium marcelo ##antes gladly vibrating ##rral ##ggles montrose ##omba lew seamus 1630 cocky ##ament ##uen bjorn ##rrick fielder fluttering ##lase methyl kimberley mcdowell reductions barbed ##jic ##tonic aeronautical condensed distracting ##promising huffed ##cala ##sle claudius invincible missy pious balthazar ci ##lang butte combo orson ##dication myriad 1707 silenced ##fed ##rh coco netball yourselves ##oza clarify heller peg durban etudes offender roast blackmail curvature ##woods vile 309 illicit suriname ##linson overture 1685 bubbling gymnast tucking ##mming ##ouin maldives ##bala gurney ##dda ##eased ##oides backside pinto jars racehorse tending ##rdial baronetcy wiener duly ##rke barbarian cupping flawed ##thesis bertha pleistocene puddle swearing ##nob ##tically fleeting prostate amulet educating ##mined ##iti ##tler 75th jens respondents analytics cavaliers papacy raju ##iente ##ulum ##tip funnel 271 disneyland ##lley sociologist ##iam 2500 faulkner louvre menon ##dson 276 ##ower afterlife mannheim peptide referees comedians meaningless ##anger ##laise fabrics hurley renal sleeps ##bour ##icle breakout kristin roadside animator clover disdain unsafe redesign ##urity firth barnsley portage reset narrows 268 commandos expansive speechless tubular ##lux essendon eyelashes smashwords ##yad ##bang ##claim craved sprinted chet somme astor wrocław orton 266 bane ##erving ##uing mischief ##amps ##sund scaling terre ##xious impairment offenses undermine moi soy contiguous arcadia inuit seam ##tops macbeth rebelled ##icative ##iot 590 elaborated frs uniformed ##dberg 259 powerless priscilla stimulated 980 qc arboretum frustrating trieste bullock ##nified enriched glistening intern ##adia locus nouvelle ollie ike lash starboard ee tapestry headlined hove rigged ##vite pollock ##yme thrive clustered cas roi gleamed olympiad ##lino pressured regimes ##hosis ##lick ripley ##ophone kickoff gallon rockwell ##arable crusader glue revolutions scrambling 1714 grover ##jure englishman aztec 263 contemplating coven ipad preach triumphant tufts ##esian rotational ##phus 328 falkland ##brates strewn clarissa rejoin environmentally glint banded drenched moat albanians johor rr maestro malley nouveau shaded taxonomy v6 adhere bunk airfields ##ritan 1741 encompass remington tran ##erative amelie mazda friar morals passions ##zai breadth vis ##hae argus burnham caressing insider rudd ##imov ##mini ##rso italianate murderous textual wainwright armada bam weave timer ##taken ##nh fra ##crest ardent salazar taps tunis ##ntino allegro gland philanthropic ##chester implication ##optera esq judas noticeably wynn ##dara inched indexed crises villiers bandit royalties patterned cupboard interspersed accessory isla kendrick entourage stitches ##esthesia headwaters ##ior interlude distraught draught 1727 ##basket biased sy transient triad subgenus adapting kidd shortstop ##umatic dimly spiked mcleod reprint nellie pretoria windmill ##cek singled ##mps 273 reunite ##orous 747 bankers outlying ##omp ##ports ##tream apologies cosmetics patsy ##deh ##ocks ##yson bender nantes serene ##nad lucha mmm 323 ##cius ##gli cmll coinage nestor juarez ##rook smeared sprayed twitching sterile irina embodied juveniles enveloped miscellaneous cancers dq gulped luisa crested swat donegal ref ##anov ##acker hearst mercantile ##lika doorbell ua vicki ##alla ##som bilbao psychologists stryker sw horsemen turkmenistan wits ##national anson mathew screenings ##umb rihanna ##agne ##nessy aisles ##iani ##osphere hines kenton saskatoon tasha truncated ##champ ##itan mildred advises fredrik interpreting inhibitors ##athi spectroscopy ##hab ##kong karim panda ##oia ##nail ##vc conqueror kgb leukemia ##dity arrivals cheered pisa phosphorus shielded ##riated mammal unitarian urgently chopin sanitary ##mission spicy drugged hinges ##tort tipping trier impoverished westchester ##caster 267 epoch nonstop ##gman ##khov aromatic centrally cerro ##tively ##vio billions modulation sedimentary 283 facilitating outrageous goldstein ##eak ##kt ld maitland penultimate pollard ##dance fleets spaceship vertebrae ##nig alcoholism als recital ##bham ##ference ##omics m2 ##bm trois ##tropical ##в commemorates ##meric marge ##raction 1643 670 cosmetic ravaged ##ige catastrophe eng ##shida albrecht arterial bellamy decor harmon ##rde bulbs synchronized vito easiest shetland shielding wnba ##glers ##ssar ##riam brianna cumbria ##aceous ##rard cores thayer ##nsk brood hilltop luminous carts keynote larkin logos ##cta ##ا ##mund ##quay lilith tinted 277 wrestle mobilization ##uses sequential siam bloomfield takahashi 274 ##ieving presenters ringo blazed witty ##oven ##ignant devastation haydn harmed newt therese ##peed gershwin molina rabbis sudanese 001 innate restarted ##sack ##fus slices wb ##shah enroll hypothetical hysterical 1743 fabio indefinite warped ##hg exchanging 525 unsuitable ##sboro gallo 1603 bret cobalt homemade ##hunter mx operatives ##dhar terraces durable latch pens whorls ##ctuated ##eaux billing ligament succumbed ##gly regulators spawn ##brick ##stead filmfare rochelle ##nzo 1725 circumstance saber supplements ##nsky ##tson crowe wellesley carrot ##9th ##movable primate drury sincerely topical ##mad ##rao callahan kyiv smarter tits undo ##yeh announcements anthologies barrio nebula ##islaus ##shaft ##tyn bodyguards 2021 assassinate barns emmett scully ##mah ##yd ##eland ##tino ##itarian demoted gorman lashed prized adventist writ ##gui alla invertebrates ##ausen 1641 amman 1742 align healy redistribution ##gf ##rize insulation ##drop adherents hezbollah vitro ferns yanking 269 php registering uppsala cheerleading confines mischievous tully ##ross 49th docked roam stipulated pumpkin ##bry prompt ##ezer blindly shuddering craftsmen frail scented katharine scramble shaggy sponge helix zaragoza 279 ##52 43rd backlash fontaine seizures posse cowan nonfiction telenovela wwii hammered undone ##gpur encircled irs ##ivation artefacts oneself searing smallpox ##belle ##osaurus shandong breached upland blushing rankin infinitely psyche tolerated docking evicted ##col unmarked ##lving gnome lettering litres musique ##oint benevolent ##jal blackened ##anna mccall racers tingle ##ocene ##orestation introductions radically 292 ##hiff ##باد 1610 1739 munchen plead ##nka condo scissors ##sight ##tens apprehension ##cey ##yin hallmark watering formulas sequels ##llas aggravated bae commencing ##building enfield prohibits marne vedic civilized euclidean jagger beforehand blasts dumont ##arney ##nem 740 conversions hierarchical rios simulator ##dya ##lellan hedges oleg thrusts shadowed darby maximize 1744 gregorian ##nded ##routed sham unspecified ##hog emory factual ##smo ##tp fooled ##rger ortega wellness marlon ##oton ##urance casket keating ley enclave ##ayan char influencing jia ##chenko 412 ammonia erebidae incompatible violins cornered ##arat grooves astronauts columbian rampant fabrication kyushu mahmud vanish ##dern mesopotamia ##lete ict ##rgen caspian kenji pitted ##vered 999 grimace roanoke tchaikovsky twinned ##analysis ##awan xinjiang arias clemson kazakh sizable 1662 ##khand ##vard plunge tatum vittorio ##nden cholera ##dana ##oper bracing indifference projectile superliga ##chee realises upgrading 299 porte retribution ##vies nk stil ##resses ama bureaucracy blackberry bosch testosterone collapses greer ##pathic ioc fifties malls ##erved bao baskets adolescents siegfried ##osity ##tosis mantra detecting existent fledgling ##cchi dissatisfied gan telecommunication mingled sobbed 6000 controversies outdated taxis ##raus fright slams ##lham ##fect ##tten detectors fetal tanned ##uw fray goth olympian skipping mandates scratches sheng unspoken hyundai tracey hotspur restrictive ##buch americana mundo ##bari burroughs diva vulcan ##6th distinctions thumping ##ngen mikey sheds fide rescues springsteen vested valuation ##ece ##ely pinnacle rake sylvie ##edo almond quivering ##irus alteration faltered ##wad 51st hydra ticked ##kato recommends ##dicated antigua arjun stagecoach wilfred trickle pronouns ##pon aryan nighttime ##anian gall pea stitch ##hei leung milos ##dini eritrea nexus starved snowfall kant parasitic cot discus hana strikers appleton kitchens ##erina ##partisan ##itha ##vius disclose metis ##channel 1701 tesla ##vera fitch 1735 blooded ##tila decimal ##tang ##bai cyclones eun bottled peas pensacola basha bolivian crabs boil lanterns partridge roofed 1645 necks ##phila opined patting ##kla ##lland chuckles volta whereupon ##nche devout euroleague suicidal ##dee inherently involuntary knitting nasser ##hide puppets colourful courageous southend stills miraculous hodgson richer rochdale ethernet greta uniting prism umm ##haya ##itical ##utation deterioration pointe prowess ##ropriation lids scranton billings subcontinent ##koff ##scope brute kellogg psalms degraded ##vez stanisław ##ructured ferreira pun astonishing gunnar ##yat arya prc gottfried ##tight excursion ##ographer dina ##quil ##nare huffington illustrious wilbur gundam verandah ##zard naacp ##odle constructive fjord kade ##naud generosity thrilling baseline cayman frankish plastics accommodations zoological ##fting cedric qb motorized ##dome ##otted squealed tackled canucks budgets situ asthma dail gabled grasslands whimpered writhing judgments ##65 minnie pv ##carbon bananas grille domes monique odin maguire markham tierney ##estra ##chua libel poke speedy atrium laval notwithstanding ##edly fai kala ##sur robb ##sma listings luz supplementary tianjin ##acing enzo jd ric scanner croats transcribed ##49 arden cv ##hair ##raphy ##lver ##uy 357 seventies staggering alam horticultural hs regression timbers blasting ##ounded montagu manipulating ##cit catalytic 1550 troopers ##meo condemnation fitzpatrick ##oire ##roved inexperienced 1670 castes ##lative outing 314 dubois flicking quarrel ste learners 1625 iq whistled ##class 282 classify tariffs temperament 355 folly liszt ##yles immersed jordanian ceasefire apparel extras maru fished ##bio harta stockport assortment craftsman paralysis transmitters ##cola blindness ##wk fatally proficiency solemnly ##orno repairing amore groceries ultraviolet ##chase schoolhouse ##tua resurgence nailed ##otype ##× ruse saliva diagrams ##tructing albans rann thirties 1b antennas hilarious cougars paddington stats ##eger breakaway ipod reza authorship prohibiting scoffed ##etz ##ttle conscription defected trondheim ##fires ivanov keenan ##adan ##ciful ##fb ##slow locating ##ials ##tford cadiz basalt blankly interned rags rattling ##tick carpathian reassured sync bum guildford iss staunch ##onga astronomers sera sofie emergencies susquehanna ##heard duc mastery vh1 williamsburg bayer buckled craving ##khan ##rdes bloomington ##write alton barbecue ##bians justine ##hri ##ndt delightful smartphone newtown photon retrieval peugeot hissing ##monium ##orough flavors lighted relaunched tainted ##games ##lysis anarchy microscopic hopping adept evade evie ##beau inhibit sinn adjustable hurst intuition wilton cisco 44th lawful lowlands stockings thierry ##dalen ##hila ##nai fates prank tb maison lobbied provocative 1724 4a utopia ##qual carbonate gujarati purcell ##rford curtiss ##mei overgrown arenas mediation swallows ##rnik respectful turnbull ##hedron ##hope alyssa ozone ##ʻi ami gestapo johansson snooker canteen cuff declines empathy stigma ##ags ##iner ##raine taxpayers gui volga ##wright ##copic lifespan overcame tattooed enactment giggles ##ador ##camp barrington bribe obligatory orbiting peng ##enas elusive sucker ##vating cong hardship empowered anticipating estrada cryptic greasy detainees planck sudbury plaid dod marriott kayla ##ears ##vb ##zd mortally ##hein cognition radha 319 liechtenstein meade richly argyle harpsichord liberalism trumpets lauded tyrant salsa tiled lear promoters reused slicing trident ##chuk ##gami ##lka cantor checkpoint ##points gaul leger mammalian ##tov ##aar ##schaft doha frenchman nirvana ##vino delgado headlining ##eron ##iography jug tko 1649 naga intersections ##jia benfica nawab ##suka ashford gulp ##deck ##vill ##rug brentford frazier pleasures dunne potsdam shenzhen dentistry ##tec flanagan ##dorff ##hear chorale dinah prem quezon ##rogated relinquished sutra terri ##pani flaps ##rissa poly ##rnet homme aback ##eki linger womb ##kson ##lewood doorstep orthodoxy threaded westfield ##rval dioceses fridays subsided ##gata loyalists ##biotic ##ettes letterman lunatic prelate tenderly invariably souza thug winslow ##otide furlongs gogh jeopardy ##runa pegasus ##umble humiliated standalone tagged ##roller freshmen klan ##bright attaining initiating transatlantic logged viz ##uance 1723 combatants intervening stephane chieftain despised grazed 317 cdc galveston godzilla macro simulate ##planes parades ##esses 960 ##ductive ##unes equator overdose ##cans ##hosh ##lifting joshi epstein sonora treacherous aquatics manchu responsive ##sation supervisory ##christ ##llins ##ibar ##balance ##uso kimball karlsruhe mab ##emy ignores phonetic reuters spaghetti 820 almighty danzig rumbling tombstone designations lured outset ##felt supermarkets ##wt grupo kei kraft susanna ##blood comprehension genealogy ##aghan ##verted redding ##ythe 1722 bowing ##pore ##roi lest sharpened fulbright valkyrie sikhs ##unds swans bouquet merritt ##tage ##venting commuted redhead clerks leasing cesare dea hazy ##vances fledged greenfield servicemen ##gical armando blackout dt sagged downloadable intra potion pods ##4th ##mism xp attendants gambia stale ##ntine plump asteroids rediscovered buds flea hive ##neas 1737 classifications debuts ##eles olympus scala ##eurs ##gno ##mute hummed sigismund visuals wiggled await pilasters clench sulfate ##ances bellevue enigma trainee snort ##sw clouded denim ##rank ##rder churning hartman lodges riches sima ##missible accountable socrates regulates mueller ##cr 1702 avoids solids himalayas nutrient pup ##jevic squat fades nec ##lates ##pina ##rona ##ου privateer tequila ##gative ##mpton apt hornet immortals ##dou asturias cleansing dario ##rries ##anta etymology servicing zhejiang ##venor ##nx horned erasmus rayon relocating £10 ##bags escalated promenade stubble 2010s artisans axial liquids mora sho yoo ##tsky bundles oldies ##nally notification bastion ##ths sparkle ##lved 1728 leash pathogen highs ##hmi immature 880 gonzaga ignatius mansions monterrey sweets bryson ##loe polled regatta brightest pei rosy squid hatfield payroll addict meath cornerback heaviest lodging ##mage capcom rippled ##sily barnet mayhem ymca snuggled rousseau ##cute blanchard 284 fragmented leighton chromosomes risking ##md ##strel ##utter corinne coyotes cynical hiroshi yeomanry ##ractive ebook grading mandela plume agustin magdalene ##rkin bea femme trafford ##coll ##lun ##tance 52nd fourier upton ##mental camilla gust iihf islamabad longevity ##kala feldman netting ##rization endeavour foraging mfa orr ##open greyish contradiction graz ##ruff handicapped marlene tweed oaxaca spp campos miocene pri configured cooks pluto cozy pornographic ##entes 70th fairness glided jonny lynne rounding sired ##emon ##nist remade uncover ##mack complied lei newsweek ##jured ##parts ##enting ##pg 293 finer guerrillas athenian deng disused stepmother accuse gingerly seduction 521 confronting ##walker ##going gora nostalgia sabres virginity wrenched ##minated syndication wielding eyre ##56 ##gnon ##igny behaved taxpayer sweeps ##growth childless gallant ##ywood amplified geraldine scrape ##ffi babylonian fresco ##rdan ##kney ##position 1718 restricting tack fukuoka osborn selector partnering ##dlow 318 gnu kia tak whitley gables ##54 ##mania mri softness immersion ##bots ##evsky 1713 chilling insignificant pcs ##uis elites lina purported supplemental teaming ##americana ##dding ##inton proficient rouen ##nage ##rret niccolo selects ##bread fluffy 1621 gruff knotted mukherjee polgara thrash nicholls secluded smoothing thru corsica loaf whitaker inquiries ##rrier ##kam indochina 289 marlins myles peking ##tea extracts pastry superhuman connacht vogel ##ditional ##het ##udged ##lash gloss quarries refit teaser ##alic ##gaon 20s materialized sling camped pickering tung tracker pursuant ##cide cranes soc ##cini ##typical ##viere anhalt overboard workout chores fares orphaned stains ##logie fenton surpassing joyah triggers ##itte grandmaster ##lass ##lists clapping fraudulent ledger nagasaki ##cor ##nosis ##tsa eucalyptus tun ##icio ##rney ##tara dax heroism ina wrexham onboard unsigned ##dates moshe galley winnie droplets exiles praises watered noodles ##aia fein adi leland multicultural stink bingo comets erskine modernized canned constraint domestically chemotherapy featherweight stifled ##mum darkly irresistible refreshing hasty isolate ##oys kitchener planners ##wehr cages yarn implant toulon elects childbirth yue ##lind ##lone cn rightful sportsman junctions remodeled specifies ##rgh 291 ##oons complimented ##urgent lister ot ##logic bequeathed cheekbones fontana gabby ##dial amadeus corrugated maverick resented triangles ##hered ##usly nazareth tyrol 1675 assent poorer sectional aegean ##cous 296 nylon ghanaian ##egorical ##weig cushions forbid fusiliers obstruction somerville ##scia dime earrings elliptical leyte oder polymers timmy atm midtown piloted settles continual externally mayfield ##uh enrichment henson keane persians 1733 benji braden pep 324 ##efe contenders pepsi valet ##isches 298 ##asse ##earing goofy stroll ##amen authoritarian occurrences adversary ahmedabad tangent toppled dorchester 1672 modernism marxism islamist charlemagne exponential racks unicode brunette mbc pic skirmish ##bund ##lad ##powered ##yst hoisted messina shatter ##ctum jedi vantage ##music ##neil clemens mahmoud corrupted authentication lowry nils ##washed omnibus wounding jillian ##itors ##opped serialized narcotics handheld ##arm ##plicity intersecting stimulating ##onis crate fellowships hemingway casinos climatic fordham copeland drip beatty leaflets robber brothel madeira ##hedral sphinx ultrasound ##vana valor forbade leonid villas ##aldo duane marquez ##cytes disadvantaged forearms kawasaki reacts consular lax uncles uphold ##hopper concepcion dorsey lass ##izan arching passageway 1708 researches tia internationals ##graphs ##opers distinguishes javanese divert ##uven plotted ##listic ##rwin ##erik ##tify affirmative signifies validation ##bson kari felicity georgina zulu ##eros ##rained ##rath overcoming ##dot argyll ##rbin 1734 chiba ratification windy earls parapet ##marks hunan pristine astrid punta ##gart brodie ##kota ##oder malaga minerva rouse ##phonic bellowed pagoda portals reclamation ##gur ##odies ##⁄₄ parentheses quoting allergic palette showcases benefactor heartland nonlinear ##tness bladed cheerfully scans ##ety ##hone 1666 girlfriends pedersen hiram sous ##liche ##nator 1683 ##nery ##orio ##umen bobo primaries smiley ##cb unearthed uniformly fis metadata 1635 ind ##oted recoil ##titles ##tura ##ια 406 hilbert jamestown mcmillan tulane seychelles ##frid antics coli fated stucco ##grants 1654 bulky accolades arrays caledonian carnage optimism puebla ##tative ##cave enforcing rotherham seo dunlop aeronautics chimed incline zoning archduke hellenistic ##oses ##sions candi thong ##ople magnate rustic ##rsk projective slant ##offs danes hollis vocalists ##ammed congenital contend gesellschaft ##ocating ##pressive douglass quieter ##cm ##kshi howled salim spontaneously townsville buena southport ##bold kato 1638 faerie stiffly ##vus ##rled 297 flawless realising taboo ##7th bytes straightening 356 jena ##hid ##rmin cartwright berber bertram soloists 411 noses 417 coping fission hardin inca ##cen 1717 mobilized vhf ##raf biscuits curate ##85 ##anial 331 gaunt neighbourhoods 1540 ##abas blanca bypassed sockets behold coincidentally ##bane nara shave splinter terrific ##arion ##erian commonplace juris redwood waistband boxed caitlin fingerprints jennie naturalized ##ired balfour craters jody bungalow hugely quilt glitter pigeons undertaker bulging constrained goo ##sil ##akh assimilation reworked ##person persuasion ##pants felicia ##cliff ##ulent 1732 explodes ##dun ##inium ##zic lyman vulture hog overlook begs northwards ow spoil ##urer fatima favorably accumulate sargent sorority corresponded dispersal kochi toned ##imi ##lita internacional newfound ##agger ##lynn ##rigue booths peanuts ##eborg medicare muriel nur ##uram crates millennia pajamas worsened ##breakers jimi vanuatu yawned ##udeau carousel ##hony hurdle ##ccus ##mounted ##pod rv ##eche airship ambiguity compulsion recapture ##claiming arthritis ##osomal 1667 asserting ngc sniffing dade discontent glendale ported ##amina defamation rammed ##scent fling livingstone ##fleet 875 ##ppy apocalyptic comrade lcd ##lowe cessna eine persecuted subsistence demi hoop reliefs 710 coptic progressing stemmed perpetrators 1665 priestess ##nio dobson ebony rooster itf tortricidae ##bbon ##jian cleanup ##jean ##øy 1721 eighties taxonomic holiness ##hearted ##spar antilles showcasing stabilized ##nb gia mascara michelangelo dawned ##uria ##vinsky extinguished fitz grotesque £100 ##fera ##loid ##mous barges neue throbbed cipher johnnie ##a1 ##mpt outburst ##swick spearheaded administrations c1 heartbreak pixels pleasantly ##enay lombardy plush ##nsed bobbie ##hly reapers tremor xiang minogue substantive hitch barak ##wyl kwan ##encia 910 obscene elegance indus surfer bribery conserve ##hyllum ##masters horatio ##fat apes rebound psychotic ##pour iteration ##mium ##vani botanic horribly antiques dispose paxton ##hli ##wg timeless 1704 disregard engraver hounds ##bau ##version looted uno facilitates groans masjid rutland antibody disqualification decatur footballers quake slacks 48th rein scribe stabilize commits exemplary tho ##hort ##chison pantry traversed ##hiti disrepair identifiable vibrated baccalaureate ##nnis csa interviewing ##iensis ##raße greaves wealthiest 343 classed jogged £5 ##58 ##atal illuminating knicks respecting ##uno scrubbed ##iji ##dles kruger moods growls raider silvia chefs kam vr cree percival ##terol gunter counterattack defiant henan ze ##rasia ##riety equivalence submissions ##fra ##thor bautista mechanically ##heater cornice herbal templar ##mering outputs ruining ligand renumbered extravagant mika blockbuster eta insurrection ##ilia darkening ferocious pianos strife kinship ##aer melee ##anor ##iste ##may ##oue decidedly weep ##jad ##missive ##ppel 354 puget unease ##gnant 1629 hammering kassel ob wessex ##lga bromwich egan paranoia utilization ##atable ##idad contradictory provoke ##ols ##ouring ##tangled knesset ##very ##lette plumbing ##sden ##¹ greensboro occult sniff 338 zev beaming gamer haggard mahal ##olt ##pins mendes utmost briefing gunnery ##gut ##pher ##zh ##rok 1679 khalifa sonya ##boot principals urbana wiring ##liffe ##minating ##rrado dahl nyu skepticism np townspeople ithaca lobster somethin ##fur ##arina ##−1 freighter zimmerman biceps contractual ##herton amend hurrying subconscious ##anal 336 meng clermont spawning ##eia ##lub dignitaries impetus snacks spotting twigs ##bilis ##cz ##ouk libertadores nic skylar ##aina ##firm gustave asean ##anum dieter legislatures flirt bromley trolls umar ##bbies ##tyle blah parc bridgeport crank negligence ##nction 46th constantin molded bandages seriousness 00pm siegel carpets compartments upbeat statehood ##dner ##edging marko 730 platt ##hane paving ##iy 1738 abbess impatience limousine nbl ##talk 441 lucille mojo nightfall robbers ##nais karel brisk calves replicate ascribed telescopes ##olf intimidated ##reen ballast specialization ##sit aerodynamic caliphate rainer visionary ##arded epsilon ##aday ##onte aggregation auditory boosted reunification kathmandu loco robyn 402 acknowledges appointing humanoid newell redeveloped restraints ##tained barbarians chopper 1609 italiana ##lez ##lho investigates wrestlemania ##anies ##bib 690 ##falls creaked dragoons gravely minions stupidity volley ##harat ##week musik ##eries ##uously fungal massimo semantics malvern ##ahl ##pee discourage embryo imperialism 1910s profoundly ##ddled jiangsu sparkled stat ##holz sweatshirt tobin ##iction sneered ##cheon ##oit brit causal smyth ##neuve diffuse perrin silvio ##ipes ##recht detonated iqbal selma ##nism ##zumi roasted ##riders tay ##ados ##mament ##mut ##rud 840 completes nipples cfa flavour hirsch ##laus calderon sneakers moravian ##ksha 1622 rq 294 ##imeters bodo ##isance ##pre ##ronia anatomical excerpt ##lke dh kunst ##tablished ##scoe biomass panted unharmed gael housemates montpellier ##59 coa rodents tonic hickory singleton ##taro 451 1719 aldo breaststroke dempsey och rocco ##cuit merton dissemination midsummer serials ##idi haji polynomials ##rdon gs enoch prematurely shutter taunton £3 ##grating ##inates archangel harassed ##asco 326 archway dazzling ##ecin 1736 sumo wat ##kovich 1086 honneur ##ently ##nostic ##ttal ##idon 1605 403 1716 blogger rents ##gnan hires ##ikh ##dant howie ##rons handler retracted shocks 1632 arun duluth kepler trumpeter ##lary peeking seasoned trooper ##mara laszlo ##iciencies ##rti heterosexual ##inatory ##ssion indira jogging ##inga ##lism beit dissatisfaction malice ##ately nedra peeling ##rgeon 47th stadiums 475 vertigo ##ains iced restroom ##plify ##tub illustrating pear ##chner ##sibility inorganic rappers receipts watery ##kura lucinda ##oulos reintroduced ##8th ##tched gracefully saxons nutritional wastewater rained favourites bedrock fisted hallways likeness upscale ##lateral 1580 blinds prequel ##pps ##tama deter humiliating restraining tn vents 1659 laundering recess rosary tractors coulter federer ##ifiers ##plin persistence ##quitable geschichte pendulum quakers ##beam bassett pictorial buffet koln ##sitor drills reciprocal shooters ##57 ##cton ##tees converge pip dmitri donnelly yamamoto aqua azores demographics hypnotic spitfire suspend wryly roderick ##rran sebastien ##asurable mavericks ##fles ##200 himalayan prodigy ##iance transvaal demonstrators handcuffs dodged mcnamara sublime 1726 crazed ##efined ##till ivo pondered reconciled shrill sava ##duk bal cad heresy jaipur goran ##nished 341 lux shelly whitehall ##hre israelis peacekeeping ##wled 1703 demetrius ousted ##arians ##zos beale anwar backstroke raged shrinking cremated ##yck benign towing wadi darmstadt landfill parana soothe colleen sidewalks mayfair tumble hepatitis ferrer superstructure ##gingly ##urse ##wee anthropological translators ##mies closeness hooves ##pw mondays ##roll ##vita landscaping ##urized purification sock thorns thwarted jalan tiberius ##taka saline ##rito confidently khyber sculptors ##ij brahms hammersmith inspectors battista fivb fragmentation hackney ##uls arresting exercising antoinette bedfordshire ##zily dyed ##hema 1656 racetrack variability ##tique 1655 austrians deteriorating madman theorists aix lehman weathered 1731 decreed eruptions 1729 flaw quinlan sorbonne flutes nunez 1711 adored downwards fable rasped 1712 moritz mouthful renegade shivers stunts dysfunction restrain translit 327 pancakes ##avio ##cision ##tray 351 vial ##lden bain ##maid ##oxide chihuahua malacca vimes ##rba ##rnier 1664 donnie plaques ##ually 337 bangs floppy huntsville loretta nikolay ##otte eater handgun ubiquitous ##hett eras zodiac 1634 ##omorphic 1820s ##zog cochran ##bula ##lithic warring ##rada dalai excused blazers mcconnell reeling bot este ##abi geese hoax taxon ##bla guitarists ##icon condemning hunts inversion moffat taekwondo ##lvis 1624 stammered ##rest ##rzy sousa fundraiser marylebone navigable uptown cabbage daniela salman shitty whimper ##kian ##utive programmers protections rm ##rmi ##rued forceful ##enes fuss ##tao ##wash brat oppressive reykjavik spartak ticking ##inkles ##kiewicz adolph horst maui protege straighten cpc landau concourse clements resultant ##ando imaginative joo reactivated ##rem ##ffled ##uising consultative ##guide flop kaitlyn mergers parenting somber ##vron supervise vidhan ##imum courtship exemplified harmonies medallist refining ##rrow ##ка amara ##hum 780 goalscorer sited overshadowed rohan displeasure secretive multiplied osman ##orth engravings padre ##kali ##veda miniatures mis ##yala clap pali rook ##cana 1692 57th antennae astro oskar 1628 bulldog crotch hackett yucatan ##sure amplifiers brno ferrara migrating ##gree thanking turing ##eza mccann ting andersson onslaught gaines ganga incense standardization ##mation sentai scuba stuffing turquoise waivers alloys ##vitt regaining vaults ##clops ##gizing digger furry memorabilia probing ##iad payton rec deutschland filippo opaque seamen zenith afrikaans ##filtration disciplined inspirational ##merie banco confuse grafton tod ##dgets championed simi anomaly biplane ##ceptive electrode ##para 1697 cleavage crossbow swirl informant ##lars ##osta afi bonfire spec ##oux lakeside slump ##culus ##lais ##qvist ##rrigan 1016 facades borg inwardly cervical xl pointedly 050 stabilization ##odon chests 1699 hacked ctv orthogonal suzy ##lastic gaulle jacobite rearview ##cam ##erted ashby ##drik ##igate ##mise ##zbek affectionately canine disperse latham ##istles ##ivar spielberg ##orin ##idium ezekiel cid ##sg durga middletown ##cina customized frontiers harden ##etano ##zzy 1604 bolsheviks ##66 coloration yoko ##bedo briefs slabs debra liquidation plumage ##oin blossoms dementia subsidy 1611 proctor relational jerseys parochial ter ##ici esa peshawar cavalier loren cpi idiots shamrock 1646 dutton malabar mustache ##endez ##ocytes referencing terminates marche yarmouth ##sop acton mated seton subtly baptised beige extremes jolted kristina telecast ##actic safeguard waldo ##baldi ##bular endeavors sloppy subterranean ##ensburg ##itung delicately pigment tq ##scu 1626 ##ound collisions coveted herds ##personal ##meister ##nberger chopra ##ricting abnormalities defective galician lucie ##dilly alligator likened ##genase burundi clears complexion derelict deafening diablo fingered champaign dogg enlist isotope labeling mrna ##erre brilliance marvelous ##ayo 1652 crawley ether footed dwellers deserts hamish rubs warlock skimmed ##lizer 870 buick embark heraldic irregularities ##ajan kiara ##kulam ##ieg antigen kowalski ##lge oakley visitation ##mbit vt ##suit 1570 murderers ##miento ##rites chimneys ##sling condemn custer exchequer havre ##ghi fluctuations ##rations dfb hendricks vaccines ##tarian nietzsche biking juicy ##duced brooding scrolling selangor ##ragan 352 annum boomed seminole sugarcane ##dna departmental dismissing innsbruck arteries ashok batavia daze kun overtook ##rga ##tlan beheaded gaddafi holm electronically faulty galilee fractures kobayashi ##lized gunmen magma aramaic mala eastenders inference messengers bf ##qu 407 bathrooms ##vere 1658 flashbacks ideally misunderstood ##jali ##weather mendez ##grounds 505 uncanny ##iii 1709 friendships ##nbc sacrament accommodated reiterated logistical pebbles thumped ##escence administering decrees drafts ##flight ##cased ##tula futuristic picket intimidation winthrop ##fahan interfered 339 afar francoise morally uta cochin croft dwarfs ##bruck ##dents ##nami biker ##hner ##meral nano ##isen ##ometric ##pres ##ан brightened meek parcels securely gunners ##jhl ##zko agile hysteria ##lten ##rcus bukit champs chevy cuckoo leith sadler theologians welded ##section 1663 jj plurality xander ##rooms ##formed shredded temps intimately pau tormented ##lok ##stellar 1618 charred ems essen ##mmel alarms spraying ascot blooms twinkle ##abia ##apes internment obsidian ##chaft snoop ##dav ##ooping malibu ##tension quiver ##itia hays mcintosh travers walsall ##ffie 1623 beverley schwarz plunging structurally m3 rosenthal vikram ##tsk 770 ghz ##onda ##tiv chalmers groningen pew reckon unicef ##rvis 55th ##gni 1651 sulawesi avila cai metaphysical screwing turbulence ##mberg augusto samba 56th baffled momentary toxin ##urian ##wani aachen condoms dali steppe ##3d ##app ##oed ##year adolescence dauphin electrically inaccessible microscopy nikita ##ega atv ##cel ##enter ##oles ##oteric ##ы accountants punishments wrongly bribes adventurous clinch flinders southland ##hem ##kata gough ##ciency lads soared ##ה undergoes deformation outlawed rubbish ##arus ##mussen ##nidae ##rzburg arcs ##ingdon ##tituted 1695 wheelbase wheeling bombardier campground zebra ##lices ##oj ##bain lullaby ##ecure donetsk wylie grenada ##arding ##ης squinting eireann opposes ##andra maximal runes ##broken ##cuting ##iface ##ror ##rosis additive britney adultery triggering ##drome detrimental aarhus containment jc swapped vichy ##ioms madly ##oric ##rag brant ##ckey ##trix 1560 1612 broughton rustling ##stems ##uder asbestos mentoring ##nivorous finley leaps ##isan apical pry slits substitutes ##dict intuitive fantasia insistent unreasonable ##igen ##vna domed hannover margot ponder ##zziness impromptu jian lc rampage stemming ##eft andrey gerais whichever amnesia appropriated anzac clicks modifying ultimatum cambrian maids verve yellowstone ##mbs conservatoire ##scribe adherence dinners spectra imperfect mysteriously sidekick tatar tuba ##aks ##ifolia distrust ##athan ##zle c2 ronin zac ##pse celaena instrumentalist scents skopje ##mbling comical compensated vidal condor intersect jingle wavelengths ##urrent mcqueen ##izzly carp weasel 422 kanye militias postdoctoral eugen gunslinger ##ɛ faux hospice ##for appalled derivation dwarves ##elis dilapidated ##folk astoria philology ##lwyn ##otho ##saka inducing philanthropy ##bf ##itative geek markedly sql ##yce bessie indices rn ##flict 495 frowns resolving weightlifting tugs cleric contentious 1653 mania rms ##miya ##reate ##ruck ##tucket bien eels marek ##ayton ##cence discreet unofficially ##ife leaks ##bber 1705 332 dung compressor hillsborough pandit shillings distal ##skin 381 ##tat ##you nosed ##nir mangrove undeveloped ##idia textures ##inho ##500 ##rise ae irritating nay amazingly bancroft apologetic compassionate kata symphonies ##lovic airspace ##lch 930 gifford precautions fulfillment sevilla vulgar martinique ##urities looting piccolo tidy ##dermott quadrant armchair incomes mathematicians stampede nilsson ##inking ##scan foo quarterfinal ##ostal shang shouldered squirrels ##owe 344 vinegar ##bner ##rchy ##systems delaying ##trics ars dwyer rhapsody sponsoring ##gration bipolar cinder starters ##olio ##urst 421 signage ##nty aground figurative mons acquaintances duets erroneously soyuz elliptic recreated ##cultural ##quette ##ssed ##tma ##zcz moderator scares ##itaire ##stones ##udence juniper sighting ##just ##nsen britten calabria ry bop cramer forsyth stillness ##л airmen gathers unfit ##umber ##upt taunting ##rip seeker streamlined ##bution holster schumann tread vox ##gano ##onzo strive dil reforming covent newbury predicting ##orro decorate tre ##puted andover ie asahi dept dunkirk gills ##tori buren huskies ##stis ##stov abstracts bets loosen ##opa 1682 yearning ##glio ##sir berman effortlessly enamel napoli persist ##peration ##uez attache elisa b1 invitations ##kic accelerating reindeer boardwalk clutches nelly polka starbucks ##kei adamant huey lough unbroken adventurer embroidery inspecting stanza ##ducted naia taluka ##pone ##roids chases deprivation florian ##jing ##ppet earthly ##lib ##ssee colossal foreigner vet freaks patrice rosewood triassic upstate ##pkins dominates ata chants ks vo ##400 ##bley ##raya ##rmed 555 agra infiltrate ##ailing ##ilation ##tzer ##uppe ##werk binoculars enthusiast fujian squeak ##avs abolitionist almeida boredom hampstead marsden rations ##ands inflated 334 bonuses rosalie patna ##rco 329 detachments penitentiary 54th flourishing woolf ##dion ##etched papyrus ##lster ##nsor ##toy bobbed dismounted endelle inhuman motorola tbs wince wreath ##ticus hideout inspections sanjay disgrace infused pudding stalks ##urbed arsenic leases ##hyl ##rrard collarbone ##waite ##wil dowry ##bant ##edance genealogical nitrate salamanca scandals thyroid necessitated ##! ##" ### ##$ ##% ##& ##' ##( ##) ##* ##+ ##, ##- ##. ##/ ##: ##; ##< ##= ##> ##? ##@ ##[ ##\ ##] ##^ ##_ ##` ##{ ##| ##} ##~ ##¡ ##¢ ##£ ##¤ ##¥ ##¦ ##§ ##¨ ##© ##ª ##« ##¬ ##® ##± ##´ ##µ ##¶ ##· ##º ##» ##¼ ##¾ ##¿ ##æ ##ð ##÷ ##þ ##đ ##ħ ##ŋ ##œ ##ƒ ##ɐ ##ɑ ##ɒ ##ɔ ##ɕ ##ə ##ɡ ##ɣ ##ɨ ##ɪ ##ɫ ##ɬ ##ɯ ##ɲ ##ɴ ##ɹ ##ɾ ##ʀ ##ʁ ##ʂ ##ʃ ##ʉ ##ʊ ##ʋ ##ʌ ##ʎ ##ʐ ##ʑ ##ʒ ##ʔ ##ʰ ##ʲ ##ʳ ##ʷ ##ʸ ##ʻ ##ʼ ##ʾ ##ʿ ##ˈ ##ˡ ##ˢ ##ˣ ##ˤ ##β ##γ ##δ ##ε ##ζ ##θ ##κ ##λ ##μ ##ξ ##ο ##π ##ρ ##σ ##τ ##υ ##φ ##χ ##ψ ##ω ##б ##г ##д ##ж ##з ##м ##п ##с ##у ##ф ##х ##ц ##ч ##ш ##щ ##ъ ##э ##ю ##ђ ##є ##і ##ј ##љ ##њ ##ћ ##ӏ ##ա ##բ ##գ ##դ ##ե ##թ ##ի ##լ ##կ ##հ ##մ ##յ ##ն ##ո ##պ ##ս ##վ ##տ ##ր ##ւ ##ք ##־ ##א ##ב ##ג ##ד ##ו ##ז ##ח ##ט ##י ##ך ##כ ##ל ##ם ##מ ##ן ##נ ##ס ##ע ##ף ##פ ##ץ ##צ ##ק ##ר ##ש ##ת ##، ##ء ##ب ##ت ##ث ##ج ##ح ##خ ##ذ ##ز ##س ##ش ##ص ##ض ##ط ##ظ ##ع ##غ ##ـ ##ف ##ق ##ك ##و ##ى ##ٹ ##پ ##چ ##ک ##گ ##ں ##ھ ##ہ ##ے ##अ ##आ ##उ ##ए ##क ##ख ##ग ##च ##ज ##ट ##ड ##ण ##त ##थ ##द ##ध ##न ##प ##ब ##भ ##म ##य ##र ##ल ##व ##श ##ष ##स ##ह ##ा ##ि ##ी ##ो ##। ##॥ ##ং ##অ ##আ ##ই ##উ ##এ ##ও ##ক ##খ ##গ ##চ ##ছ ##জ ##ট ##ড ##ণ ##ত ##থ ##দ ##ধ ##ন ##প ##ব ##ভ ##ম ##য ##র ##ল ##শ ##ষ ##স ##হ ##া ##ি ##ী ##ে ##க ##ச ##ட ##த ##ந ##ன ##ப ##ம ##ய ##ர ##ல ##ள ##வ ##ா ##ி ##ு ##ே ##ை ##ನ ##ರ ##ಾ ##ක ##ය ##ර ##ල ##ව ##ා ##ก ##ง ##ต ##ท ##น ##พ ##ม ##ย ##ร ##ล ##ว ##ส ##อ ##า ##เ ##་ ##། ##ག ##ང ##ད ##ན ##པ ##བ ##མ ##འ ##ར ##ལ ##ས ##မ ##ა ##ბ ##გ ##დ ##ე ##ვ ##თ ##ი ##კ ##ლ ##მ ##ნ ##ო ##რ ##ს ##ტ ##უ ##ᄀ ##ᄂ ##ᄃ ##ᄅ ##ᄆ ##ᄇ ##ᄉ ##ᄊ ##ᄋ ##ᄌ ##ᄎ ##ᄏ ##ᄐ ##ᄑ ##ᄒ ##ᅡ ##ᅢ ##ᅥ ##ᅦ ##ᅧ ##ᅩ ##ᅪ ##ᅭ ##ᅮ ##ᅯ ##ᅲ ##ᅳ ##ᅴ ##ᅵ ##ᆨ ##ᆫ ##ᆯ ##ᆷ ##ᆸ ##ᆼ ##ᴬ ##ᴮ ##ᴰ ##ᴵ ##ᴺ ##ᵀ ##ᵃ ##ᵇ ##ᵈ ##ᵉ ##ᵍ ##ᵏ ##ᵐ ##ᵒ ##ᵖ ##ᵗ ##ᵘ ##ᵣ ##ᵤ ##ᵥ ##ᶜ ##ᶠ ##‐ ##‑ ##‒ ##– ##— ##― ##‖ ##‘ ##’ ##‚ ##“ ##” ##„ ##† ##‡ ##• ##… ##‰ ##′ ##″ ##› ##‿ ##⁄ ##⁰ ##ⁱ ##⁴ ##⁵ ##⁶ ##⁷ ##⁸ ##⁹ ##⁻ ##ⁿ ##₅ ##₆ ##₇ ##₈ ##₉ ##₊ ##₍ ##₎ ##ₐ ##ₑ ##ₒ ##ₓ ##ₕ ##ₖ ##ₗ ##ₘ ##ₚ ##ₛ ##ₜ ##₤ ##₩ ##€ ##₱ ##₹ ##ℓ ##№ ##ℝ ##™ ##⅓ ##⅔ ##← ##↑ ##→ ##↓ ##↔ ##↦ ##⇄ ##⇌ ##⇒ ##∂ ##∅ ##∆ ##∇ ##∈ ##∗ ##∘ ##√ ##∞ ##∧ ##∨ ##∩ ##∪ ##≈ ##≡ ##≤ ##≥ ##⊂ ##⊆ ##⊕ ##⊗ ##⋅ ##─ ##│ ##■ ##▪ ##● ##★ ##☆ ##☉ ##♠ ##♣ ##♥ ##♦ ##♯ ##⟨ ##⟩ ##ⱼ ##⺩ ##⺼ ##⽥ ##、 ##。 ##〈 ##〉 ##《 ##》 ##「 ##」 ##『 ##』 ##〜 ##あ ##い ##う ##え ##お ##か ##き ##く ##け ##こ ##さ ##し ##す ##せ ##そ ##た ##ち ##っ ##つ ##て ##と ##な ##に ##ぬ ##ね ##の ##は ##ひ ##ふ ##へ ##ほ ##ま ##み ##む ##め ##も ##や ##ゆ ##よ ##ら ##り ##る ##れ ##ろ ##を ##ん ##ァ ##ア ##ィ ##イ ##ウ ##ェ ##エ ##オ ##カ ##キ ##ク ##ケ ##コ ##サ ##シ ##ス ##セ ##タ ##チ ##ッ ##ツ ##テ ##ト ##ナ ##ニ ##ノ ##ハ ##ヒ ##フ ##ヘ ##ホ ##マ ##ミ ##ム ##メ ##モ ##ャ ##ュ ##ョ ##ラ ##リ ##ル ##レ ##ロ ##ワ ##ン ##・ ##ー ##一 ##三 ##上 ##下 ##不 ##世 ##中 ##主 ##久 ##之 ##也 ##事 ##二 ##五 ##井 ##京 ##人 ##亻 ##仁 ##介 ##代 ##仮 ##伊 ##会 ##佐 ##侍 ##保 ##信 ##健 ##元 ##光 ##八 ##公 ##内 ##出 ##分 ##前 ##劉 ##力 ##加 ##勝 ##北 ##区 ##十 ##千 ##南 ##博 ##原 ##口 ##古 ##史 ##司 ##合 ##吉 ##同 ##名 ##和 ##囗 ##四 ##国 ##國 ##土 ##地 ##坂 ##城 ##堂 ##場 ##士 ##夏 ##外 ##大 ##天 ##太 ##夫 ##奈 ##女 ##子 ##学 ##宀 ##宇 ##安 ##宗 ##定 ##宣 ##宮 ##家 ##宿 ##寺 ##將 ##小 ##尚 ##山 ##岡 ##島 ##崎 ##川 ##州 ##巿 ##帝 ##平 ##年 ##幸 ##广 ##弘 ##張 ##彳 ##後 ##御 ##德 ##心 ##忄 ##志 ##忠 ##愛 ##成 ##我 ##戦 ##戸 ##手 ##扌 ##政 ##文 ##新 ##方 ##日 ##明 ##星 ##春 ##昭 ##智 ##曲 ##書 ##月 ##有 ##朝 ##木 ##本 ##李 ##村 ##東 ##松 ##林 ##森 ##楊 ##樹 ##橋 ##歌 ##止 ##正 ##武 ##比 ##氏 ##民 ##水 ##氵 ##氷 ##永 ##江 ##沢 ##河 ##治 ##法 ##海 ##清 ##漢 ##瀬 ##火 ##版 ##犬 ##王 ##生 ##田 ##男 ##疒 ##発 ##白 ##的 ##皇 ##目 ##相 ##省 ##真 ##石 ##示 ##社 ##神 ##福 ##禾 ##秀 ##秋 ##空 ##立 ##章 ##竹 ##糹 ##美 ##義 ##耳 ##良 ##艹 ##花 ##英 ##華 ##葉 ##藤 ##行 ##街 ##西 ##見 ##訁 ##語 ##谷 ##貝 ##貴 ##車 ##軍 ##辶 ##道 ##郎 ##郡 ##部 ##都 ##里 ##野 ##金 ##鈴 ##镇 ##長 ##門 ##間 ##阝 ##阿 ##陳 ##陽 ##雄 ##青 ##面 ##風 ##食 ##香 ##馬 ##高 ##龍 ##龸 ##fi ##fl ##! ##( ##) ##, ##- ##. ##/ ##: ##? ##~
TensorFlow2/Segmentation/MaskRCNN
MaskRCNN
requirements
opencv-python-headless git+https://github.com/NVIDIA/cocoapi#egg=pycocotools&subdirectory=PythonAPI git+https://github.com/NVIDIA/dllogger
PyTorch/SpeechSynthesis/HiFiGAN/platform
platform
DGX1_HiFi-GAN_FP32_8GPU
#!/bin/bash set -a : ${NUM_GPUS:=8} : ${BATCH_SIZE:=16} : ${GRAD_ACCUMULATION:=1} : ${AMP:=false} bash scripts/train_lj22khz.sh "$@"
TensorFlow2/Classification/ConvNets/efficientnet_v2/S/training/TF32
TF32
convergence_1xA100-80G
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python3 main.py \ --cfg config/efficientnet_v2/s_cfg.py \ --mode train_and_eval \ --use_xla \ --model_dir ./output/ \ --data_dir /data/ \ --log_steps 500 \ --save_checkpoint_freq 10 \ --n_stages 4 \ --max_epochs 350 \ --train_batch_size 230 \ --train_img_size 300 \ --base_img_size 128 \ --lr_decay cosine \ --lr_init 0.005 \ --weight_decay .000005 \ --opt_epsilon 0.001 \ --moving_average_decay 0.9999 \ --eval_img_size 384 \ --eval_batch_size 100 \ --augmenter_name randaugment \ --raug_num_layers 2 \ --raug_magnitude 15 \ --cutmix_alpha 0 \ --mixup_alpha 0 \ --defer_img_mixing
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/config
config
defaults
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import os from yacs.config import CfgNode as CN # ----------------------------------------------------------------------------- # Convention about Training / Test specific parameters # ----------------------------------------------------------------------------- # Whenever an argument can be either used for training or for testing, the # corresponding name will be post-fixed by a _TRAIN for a training parameter, # or _TEST for a test-specific parameter. # For example, the number of images during training will be # IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be # IMAGES_PER_BATCH_TEST # ----------------------------------------------------------------------------- # Config definition # ----------------------------------------------------------------------------- _C = CN() _C.MODEL = CN() _C.MODEL.RPN_ONLY = False _C.MODEL.MASK_ON = False _C.MODEL.DEVICE = "cuda" _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN" # If the WEIGHT starts with a catalog://, like :R-50, the code will look for # the path in paths_catalog. Else, it will use it as the specified absolute # path _C.MODEL.WEIGHT = "" # ----------------------------------------------------------------------------- # INPUT # ----------------------------------------------------------------------------- _C.INPUT = CN() # Size of the smallest side of the image during training _C.INPUT.MIN_SIZE_TRAIN = 800 # (800,) # Maximum size of the side of the image during training _C.INPUT.MAX_SIZE_TRAIN = 1333 # Size of the smallest side of the image during testing _C.INPUT.MIN_SIZE_TEST = 800 # Maximum size of the side of the image during testing _C.INPUT.MAX_SIZE_TEST = 1333 # Values to be used for image normalization _C.INPUT.PIXEL_MEAN = [102.9801, 115.9465, 122.7717] # Values to be used for image normalization _C.INPUT.PIXEL_STD = [1., 1., 1.] # Convert image to BGR format (for Caffe2 models), in range 0-255 _C.INPUT.TO_BGR255 = True # ----------------------------------------------------------------------------- # Dataset # ----------------------------------------------------------------------------- _C.DATASETS = CN() # List of the dataset names for training, as present in paths_catalog.py _C.DATASETS.TRAIN = () # List of the dataset names for testing, as present in paths_catalog.py _C.DATASETS.TEST = () # ----------------------------------------------------------------------------- # DataLoader # ----------------------------------------------------------------------------- _C.DATALOADER = CN() # Number of data loading threads _C.DATALOADER.NUM_WORKERS = 4 # If > 0, this enforces that each collated batch should have a size divisible # by SIZE_DIVISIBILITY _C.DATALOADER.SIZE_DIVISIBILITY = 0 # If True, each batch should contain only images for which the aspect ratio # is compatible. This groups portrait images together, and landscape images # are not batched with portrait images. _C.DATALOADER.ASPECT_RATIO_GROUPING = True # If True, the custom Hybrid Dataloader is used. # If not, torch.utils.data.DataLoader is used for dataloading. _C.DATALOADER.HYBRID = True # ---------------------------------------------------------------------------- # # Backbone options # ---------------------------------------------------------------------------- # _C.MODEL.BACKBONE = CN() # The backbone conv body to use # The string must match a function that is imported in modeling.model_builder # (e.g., 'FPN.add_fpn_ResNet101_conv5_body' to specify a ResNet-101-FPN # backbone) _C.MODEL.BACKBONE.CONV_BODY = "R-50-C4" # Add StopGrad at a specified stage so the bottom layers are frozen _C.MODEL.BACKBONE.FREEZE_CONV_BODY_AT = 2 _C.MODEL.BACKBONE.OUT_CHANNELS = 256 * 4 # GN for backbone _C.MODEL.BACKBONE.USE_GN = False # ---------------------------------------------------------------------------- # # FPN options # ---------------------------------------------------------------------------- # _C.MODEL.FPN = CN() _C.MODEL.FPN.USE_GN = False _C.MODEL.FPN.USE_RELU = False # ---------------------------------------------------------------------------- # # Group Norm options # ---------------------------------------------------------------------------- # _C.MODEL.GROUP_NORM = CN() # Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS) _C.MODEL.GROUP_NORM.DIM_PER_GP = -1 # Number of groups in GroupNorm (-1 if using DIM_PER_GP) _C.MODEL.GROUP_NORM.NUM_GROUPS = 32 # GroupNorm's small constant in the denominator _C.MODEL.GROUP_NORM.EPSILON = 1e-5 # ---------------------------------------------------------------------------- # # RPN options # ---------------------------------------------------------------------------- # _C.MODEL.RPN = CN() _C.MODEL.RPN.USE_FPN = False # Base RPN anchor sizes given in absolute pixels w.r.t. the scaled network input _C.MODEL.RPN.ANCHOR_SIZES = (32, 64, 128, 256, 512) # Stride of the feature map that RPN is attached. # For FPN, number of strides should match number of scales _C.MODEL.RPN.ANCHOR_STRIDE = (16,) # RPN anchor aspect ratios _C.MODEL.RPN.ASPECT_RATIOS = (0.5, 1.0, 2.0) # Remove RPN anchors that go outside the image by RPN_STRADDLE_THRESH pixels # Set to -1 or a large value, e.g. 100000, to disable pruning anchors _C.MODEL.RPN.STRADDLE_THRESH = 0 # Minimum overlap required between an anchor and ground-truth box for the # (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD # ==> positive RPN example) _C.MODEL.RPN.FG_IOU_THRESHOLD = 0.7 # Maximum overlap allowed between an anchor and ground-truth box for the # (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD # ==> negative RPN example) _C.MODEL.RPN.BG_IOU_THRESHOLD = 0.3 # Total number of RPN examples per image _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256 # Target fraction of foreground (positive) examples per RPN minibatch _C.MODEL.RPN.POSITIVE_FRACTION = 0.5 # Number of top scoring RPN proposals to keep before applying NMS # When FPN is used, this is *per FPN level* (not total) _C.MODEL.RPN.PRE_NMS_TOP_N_TRAIN = 12000 _C.MODEL.RPN.PRE_NMS_TOP_N_TEST = 6000 # Number of top scoring RPN proposals to keep after applying NMS _C.MODEL.RPN.POST_NMS_TOP_N_TRAIN = 2000 _C.MODEL.RPN.POST_NMS_TOP_N_TEST = 1000 # NMS threshold used on RPN proposals _C.MODEL.RPN.NMS_THRESH = 0.7 # Proposal height and width both need to be greater than RPN_MIN_SIZE # (a the scale used during training or inference) _C.MODEL.RPN.MIN_SIZE = 0 # Number of top scoring RPN proposals to keep after combining proposals from # all FPN levels _C.MODEL.RPN.FPN_POST_NMS_TOP_N_TRAIN = 2000 _C.MODEL.RPN.FPN_POST_NMS_TOP_N_TEST = 2000 # Custom rpn head, empty to use default conv or separable conv _C.MODEL.RPN.RPN_HEAD = "SingleConvRPNHead" # ---------------------------------------------------------------------------- # # ROI HEADS options # ---------------------------------------------------------------------------- # _C.MODEL.ROI_HEADS = CN() _C.MODEL.ROI_HEADS.USE_FPN = False # Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD) _C.MODEL.ROI_HEADS.FG_IOU_THRESHOLD = 0.5 # Overlap threshold for an RoI to be considered background # (class = 0 if overlap in [0, BG_IOU_THRESHOLD)) _C.MODEL.ROI_HEADS.BG_IOU_THRESHOLD = 0.5 # Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets # These are empirically chosen to approximately lead to unit variance targets _C.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS = (10., 10., 5., 5.) # RoI minibatch size *per image* (number of regions of interest [ROIs]) # Total number of RoIs per training minibatch = # TRAIN.BATCH_SIZE_PER_IM * TRAIN.IMS_PER_BATCH * NUM_GPUS # E.g., a common configuration is: 512 * 2 * 8 = 8192 _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0) _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25 # Only used on test mode # Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to # balance obtaining high recall with not having too many low precision # detections that will slow down inference post processing steps (like NMS) _C.MODEL.ROI_HEADS.SCORE_THRESH = 0.05 # Overlap threshold used for non-maximum suppression (suppress boxes with # IoU >= this threshold) _C.MODEL.ROI_HEADS.NMS = 0.5 # Maximum number of detections to return per image (100 is based on the limit # established for the COCO dataset) _C.MODEL.ROI_HEADS.DETECTIONS_PER_IMG = 100 _C.MODEL.ROI_BOX_HEAD = CN() _C.MODEL.ROI_BOX_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor" _C.MODEL.ROI_BOX_HEAD.PREDICTOR = "FastRCNNPredictor" _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_BOX_HEAD.POOLER_SCALES = (1.0 / 16,) _C.MODEL.ROI_BOX_HEAD.NUM_CLASSES = 81 # Hidden layer dimension when using an MLP for the RoI box head _C.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM = 1024 # GN _C.MODEL.ROI_BOX_HEAD.USE_GN = False # Dilation _C.MODEL.ROI_BOX_HEAD.DILATION = 1 _C.MODEL.ROI_BOX_HEAD.CONV_HEAD_DIM = 256 _C.MODEL.ROI_BOX_HEAD.NUM_STACKED_CONVS = 4 _C.MODEL.ROI_MASK_HEAD = CN() _C.MODEL.ROI_MASK_HEAD.FEATURE_EXTRACTOR = "ResNet50Conv5ROIFeatureExtractor" _C.MODEL.ROI_MASK_HEAD.PREDICTOR = "MaskRCNNC4Predictor" _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0 _C.MODEL.ROI_MASK_HEAD.POOLER_SCALES = (1.0 / 16,) _C.MODEL.ROI_MASK_HEAD.MLP_HEAD_DIM = 1024 _C.MODEL.ROI_MASK_HEAD.CONV_LAYERS = (256, 256, 256, 256) _C.MODEL.ROI_MASK_HEAD.RESOLUTION = 14 _C.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR = True # Whether or not resize and translate masks to the input image. _C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS = False _C.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD = 0.5 # Dilation _C.MODEL.ROI_MASK_HEAD.DILATION = 1 # GN _C.MODEL.ROI_MASK_HEAD.USE_GN = False # ---------------------------------------------------------------------------- # # ResNe[X]t options (ResNets = {ResNet, ResNeXt} # Note that parts of a resnet may be used for both the backbone and the head # These options apply to both # ---------------------------------------------------------------------------- # _C.MODEL.RESNETS = CN() # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt _C.MODEL.RESNETS.NUM_GROUPS = 1 # Baseline width of each group _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64 # Place the stride 2 conv on the 1x1 filter # Use True only for the original MSRA ResNet; use False for C2 and Torch models _C.MODEL.RESNETS.STRIDE_IN_1X1 = True # Residual transformation function _C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithFixedBatchNorm" # ResNet's stem function (conv1 and pool1) _C.MODEL.RESNETS.STEM_FUNC = "StemWithFixedBatchNorm" # Apply dilation in stage "res5" _C.MODEL.RESNETS.RES5_DILATION = 1 _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256 _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64 # ---------------------------------------------------------------------------- # # Solver # ---------------------------------------------------------------------------- # _C.SOLVER = CN() _C.SOLVER.MAX_ITER = 40000 _C.SOLVER.BASE_LR = 0.001 _C.SOLVER.BIAS_LR_FACTOR = 2 _C.SOLVER.MOMENTUM = 0.9 _C.SOLVER.WEIGHT_DECAY = 0.0005 _C.SOLVER.WEIGHT_DECAY_BIAS = 0 _C.SOLVER.GAMMA = 0.1 _C.SOLVER.STEPS = (30000,) _C.SOLVER.WARMUP_FACTOR = 1.0 / 3 _C.SOLVER.WARMUP_ITERS = 500 _C.SOLVER.WARMUP_METHOD = "linear" _C.SOLVER.CHECKPOINT_PERIOD = 2500 # Number of images per batch # This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will # see 2 images per batch _C.SOLVER.IMS_PER_BATCH = 16 # Parameters to accumulate gradient across steps _C.SOLVER.ACCUMULATE_STEPS = 1 _C.SOLVER.ACCUMULATE_GRAD = False # ---------------------------------------------------------------------------- # # Specific test options # ---------------------------------------------------------------------------- # _C.TEST = CN() _C.TEST.EXPECTED_RESULTS = [] _C.TEST.EXPECTED_RESULTS_SIGMA_TOL = 4 # Number of images per batch # This is global, so if we have 8 GPUs and IMS_PER_BATCH = 16, each GPU will # see 2 images per batch _C.TEST.IMS_PER_BATCH = 8 # ---------------------------------------------------------------------------- # # Misc options # ---------------------------------------------------------------------------- # _C.OUTPUT_DIR = "." _C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog.py") _C.USE_TORCH_DDP = True # ---------------------------------------------------------------------------- # # Precision options # ---------------------------------------------------------------------------- # # Precision of input, allowable: (float32, float16) _C.DTYPE = "float32" # Use NATIVE NHWC/NCHW for eligible convolutions _C.NHWC = True # Evaluate every epoch _C.PER_EPOCH_EVAL = False _C.MIN_BBOX_MAP = 0.377 _C.MIN_MASK_MAP = 0.342 _C.SAVE_CHECKPOINT = True
PyTorch/Classification/ConvNets/image_classification
image_classification
smoothing
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn class LabelSmoothing(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.0): """ Constructor for the LabelSmoothing module. :param smoothing: label smoothing factor """ super(LabelSmoothing, self).__init__() self.confidence = 1.0 - smoothing self.smoothing = smoothing def forward(self, x, target): logprobs = torch.nn.functional.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean()
TensorFlow/Detection/SSD/models/research/object_detection/samples/configs
configs
faster_rcnn_resnet152_coco
# Faster R-CNN with Resnet-152 (v1), configuration for MSCOCO Dataset. # Users should configure the fine_tune_checkpoint field in the train config as # well as the label_map_path and input_path fields in the train_input_reader and # eval_input_reader. Search for "PATH_TO_BE_CONFIGURED" to find the fields that # should be configured. model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer { min_dimension: 600 max_dimension: 1024 } } feature_extractor { type: 'faster_rcnn_resnet152' first_stage_features_stride: 16 } first_stage_anchor_generator { grid_anchor_generator { scales: [0.25, 0.5, 1.0, 2.0] aspect_ratios: [0.5, 1.0, 2.0] height_stride: 16 width_stride: 16 } } first_stage_box_predictor_conv_hyperparams { op: CONV regularizer { l2_regularizer { weight: 0.0 } } initializer { truncated_normal_initializer { stddev: 0.01 } } } first_stage_nms_score_threshold: 0.0 first_stage_nms_iou_threshold: 0.7 first_stage_max_proposals: 300 first_stage_localization_loss_weight: 2.0 first_stage_objectness_loss_weight: 1.0 initial_crop_size: 14 maxpool_kernel_size: 2 maxpool_stride: 2 second_stage_box_predictor { mask_rcnn_box_predictor { use_dropout: false dropout_keep_probability: 1.0 fc_hyperparams { op: FC regularizer { l2_regularizer { weight: 0.0 } } initializer { variance_scaling_initializer { factor: 1.0 uniform: true mode: FAN_AVG } } } } } second_stage_post_processing { batch_non_max_suppression { score_threshold: 0.0 iou_threshold: 0.6 max_detections_per_class: 100 max_total_detections: 300 } score_converter: SOFTMAX } second_stage_localization_loss_weight: 2.0 second_stage_classification_loss_weight: 1.0 } } train_config: { batch_size: 1 optimizer { momentum_optimizer: { learning_rate: { manual_step_learning_rate { initial_learning_rate: 0.0003 schedule { step: 900000 learning_rate: .00003 } schedule { step: 1200000 learning_rate: .000003 } } } momentum_optimizer_value: 0.9 } use_moving_average: false } gradient_clipping_by_norm: 10.0 fine_tune_checkpoint: "PATH_TO_BE_CONFIGURED/model.ckpt" from_detection_checkpoint: true # Note: The below line limits the training process to 200K steps, which we # empirically found to be sufficient enough to train the pets dataset. This # effectively bypasses the learning rate schedule (the learning rate will # never decay). Remove the below line to train indefinitely. num_steps: 200000 data_augmentation_options { random_horizontal_flip { } } } train_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_train.record-?????-of-00100" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" } eval_config: { num_examples: 8000 # Note: The below line limits the evaluation process to 10 evaluations. # Remove the below line to evaluate indefinitely. max_evals: 10 } eval_input_reader: { tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/mscoco_val.record-?????-of-00010" } label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt" shuffle: false num_readers: 1 }
PyTorch/Detection/Efficientdet/effdet
effdet
efficientnet
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from collections import namedtuple import torch from torch import nn BlockParameters = namedtuple('BlockParameters', ['kernel_size', 'stride', 'num_repeat', 'in_channels', 'out_channels', 'expand_ratio']) GlobalParameters = namedtuple('GlobalParameters', ['squeeze_excitation_ratio', 'batchnorm_momentum', 'batchnorm_epsilon', 'stochastic_depth_survival_prob', 'feature_channels', "weights_init_mode"]) efficientnet_configs = { "fanin": GlobalParameters( squeeze_excitation_ratio=0.25, batchnorm_momentum=1-0.99, # batchnorm momentum definition is different in pytorch and original paper batchnorm_epsilon=1e-3, stochastic_depth_survival_prob=0.8, feature_channels=1280, weights_init_mode="fan_in" ), "fanout": GlobalParameters( squeeze_excitation_ratio=0.25, batchnorm_momentum=1-0.99, batchnorm_epsilon=1e-3, stochastic_depth_survival_prob=0.8, feature_channels=1280, weights_init_mode="fan_out" ), } BASE_EFFICIENTNET_BLOCKS_CONFIG = [ BlockParameters(kernel_size=3, stride=1, num_repeat=1, in_channels=32, out_channels=16, expand_ratio=1), BlockParameters(kernel_size=3, stride=2, num_repeat=2, in_channels=16, out_channels=24, expand_ratio=6), BlockParameters(kernel_size=5, stride=2, num_repeat=2, in_channels=24, out_channels=40, expand_ratio=6), BlockParameters(kernel_size=3, stride=2, num_repeat=3, in_channels=40, out_channels=80, expand_ratio=6), BlockParameters(kernel_size=5, stride=1, num_repeat=3, in_channels=80, out_channels=112, expand_ratio=6), BlockParameters(kernel_size=5, stride=2, num_repeat=4, in_channels=112, out_channels=192, expand_ratio=6), BlockParameters(kernel_size=3, stride=1, num_repeat=1, in_channels=192, out_channels=320, expand_ratio=6) ] def _scale_width(num_channels, width_coeff, divisor=8): num_channels *= width_coeff # Rounding should not go down by more than 10% rounded_num_channels = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) if rounded_num_channels < 0.9 * num_channels: rounded_num_channels += divisor return rounded_num_channels def scaled_efficientnet_config(width_coeff, depth_coeff): config = [ block._replace( num_repeat=int(math.ceil(block.num_repeat * depth_coeff)), in_channels=_scale_width(block.in_channels, width_coeff), out_channels=_scale_width(block.out_channels, width_coeff), ) for block in BASE_EFFICIENTNET_BLOCKS_CONFIG ] return config class SqueezeAndExcitation(nn.Module): def __init__(self, in_channels, squeeze, activation): super(SqueezeAndExcitation, self).__init__() self.squeeze = nn.Linear(in_channels, squeeze) self.expand = nn.Linear(squeeze, in_channels) self.activation = activation self.sigmoid = nn.Sigmoid() def forward(self, x): out = torch.mean(x.view(x.size(0), x.size(1), -1), 2) out = self.squeeze(out) out = self.activation(out) out = self.expand(out) out = self.sigmoid(out) out = out.unsqueeze(2).unsqueeze(3) return out # Since torch.nn.SiLU is not supported in ONNX, # it is required to use this implementation in exported model (15-20% more GPU memory is needed) class MemoryInefficientSiLU(nn.Module): def __init__(self, *args, **kwargs): super(MemoryInefficientSiLU, self).__init__() def forward(self, x): return x * torch.sigmoid(x) class ConvBN(nn.Sequential): def __init__(self, kernel_size, stride, in_channels, out_channels, activation, bn_momentum, bn_epsilon, groups=1): layers = [ nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, groups=groups, bias=False, padding=int((kernel_size - 1) / 2)), nn.BatchNorm2d(out_channels, momentum=bn_momentum, eps=bn_epsilon), ] if activation is not None: layers.append(activation) super(ConvBN, self).__init__(*layers) class MBConvBlock(nn.Module): def __init__(self, block_config, global_config, survival_prob, activation): super(MBConvBlock, self).__init__() self.in_channels = block_config.in_channels self.out_channels = block_config.out_channels self.hidden_dim = self.in_channels * block_config.expand_ratio self.squeeze_dim = max(1, int(self.in_channels * global_config.squeeze_excitation_ratio)) self.kernel_size = block_config.kernel_size self.stride = block_config.stride self.stochastic_depth_survival_prob = survival_prob bn_momentum = global_config.batchnorm_momentum bn_epsilon = global_config.batchnorm_epsilon if self.in_channels != self.hidden_dim: self.expand_conv = ConvBN(1, 1, self.in_channels, self.hidden_dim, activation(), bn_momentum=bn_momentum, bn_epsilon=bn_epsilon) self.squeeze_and_excitation = SqueezeAndExcitation(self.hidden_dim, self.squeeze_dim, activation()) self.depthwise_conv = ConvBN(self.kernel_size, self.stride, self.hidden_dim, self.hidden_dim, activation(), groups=self.hidden_dim, bn_momentum=bn_momentum, bn_epsilon=bn_epsilon) self.project_conv = ConvBN(1, 1, self.hidden_dim, self.out_channels, activation=None, bn_momentum=bn_momentum, bn_epsilon=bn_epsilon) def _drop_connections(self, x, synchronized=False): if not self.training: return x random_mask = torch.rand([x.shape[0], 1, 1, 1], dtype=x.dtype, device=x.device) if synchronized: torch.distributed.broadcast(random_mask, 0) random_mask = (self.stochastic_depth_survival_prob + random_mask).floor() scaled_x = x / self.stochastic_depth_survival_prob return scaled_x * random_mask def forward(self, inputs): x = inputs if self.in_channels != self.hidden_dim: x = self.expand_conv(x) x = self.depthwise_conv(x) x = x * self.squeeze_and_excitation(x) x = self.project_conv(x) if self.stride == 1 and self.in_channels == self.out_channels: if self.stochastic_depth_survival_prob != 1: x = self._drop_connections(x) x = x + inputs return x class EfficientNet(nn.Module): def __init__(self, width_coeff, depth_coeff, dropout, num_classes, global_config, features_only=True, out_indices=None, onnx_exportable=False): super(EfficientNet, self).__init__() self.features_only = features_only self.efficientnet_blocks_config = scaled_efficientnet_config(width_coeff, depth_coeff) self.global_config = global_config self.in_channels = 3 self.feature_channels = _scale_width(self.global_config.feature_channels, width_coeff) self.activation = torch.nn.SiLU if not onnx_exportable else MemoryInefficientSiLU self.input_conv = ConvBN(3, 2, self.in_channels, self.efficientnet_blocks_config[0].in_channels, activation=self.activation(), bn_momentum=self.global_config.batchnorm_momentum, bn_epsilon=self.global_config.batchnorm_epsilon) self.feature_info = [] self.mbconv_blocks = nn.Sequential(*self.mbconv_blocks_generator()) if not self.features_only: self.features_conv = ConvBN(1, 1, self.efficientnet_blocks_config[-1].out_channels, self.feature_channels, activation=self.activation(), bn_momentum=self.global_config.batchnorm_momentum, bn_epsilon=self.global_config.batchnorm_epsilon) self.avg_pooling = nn.AdaptiveAvgPool2d(1) self.dropout = nn.Dropout(dropout) self.fc = nn.Linear(self.feature_channels, num_classes) if out_indices is not None: self.feature_info = [v for i, v in enumerate(self.feature_info) if i in out_indices] def mbconv_blocks_generator(self): num_blocks = sum([block_config.num_repeat for block_config in self.efficientnet_blocks_config]) drop_rate = 1.0 - self.global_config.stochastic_depth_survival_prob idx = 0 current_stride = 2 prev_block_config = None for config_idx, block_config in enumerate(self.efficientnet_blocks_config): for i in range(block_config.num_repeat): # Conditions for feature extraction if config_idx == len(self.efficientnet_blocks_config)-1 and i == block_config.num_repeat-1: self.feature_info.append(dict(block_idx=idx, reduction=current_stride, num_chs=block_config.out_channels)) elif prev_block_config is not None and block_config.stride > 1: self.feature_info.append(dict(block_idx=idx-1, reduction=current_stride, num_chs=prev_block_config.out_channels)) # Calculating the current stride if block_config.stride > 1: current_stride = current_stride * block_config.stride survival_prob = 1.0 - drop_rate * float(idx) / num_blocks yield MBConvBlock(block_config, self.global_config, survival_prob=survival_prob, activation=self.activation) idx += 1 prev_block_config = block_config block_config = block_config._replace(in_channels=block_config.out_channels, stride=1) def forward(self, inputs): x = inputs x = self.input_conv(x) features = [] extraction_idx = 0 for i, b in enumerate(self.mbconv_blocks): x = b(x) if i == self.feature_info[extraction_idx]['block_idx']: features.append(x) extraction_idx += 1 return x, features
Tools/PyTorch/TimeSeriesPredictionPlatform/triton/deployment_toolkit
deployment_toolkit
__init__
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
TensorFlow2/Recommendation/WideAndDeep/triton/runner
runner
config
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pathlib from typing import Dict, List, Optional, Union import yaml from ..deployment_toolkit.core import PerformanceTool if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .configuration import Configuration from .core import DataObject from .triton import Triton class Checkpoint(DataObject): """ Checkpoint data placeholder """ name: str url: str def __init__(self, name: str, url: str): self.name = name self.url = url class Dataset(DataObject): """ Dataset data placeholder """ name: str def __init__(self, name: str): self.name = name class Config(DataObject): """ Configuration object for runner experiments """ def __init__( self, model_name: str, framework: str, container_version: str, batching: str, configurations: List[Configuration], ensemble_model_name: Optional[str] = None, datasets_dir: str = "datasets", datasets: List[Dataset] = None, checkpoints: List[Checkpoint] = None, triton_dockerfile: Optional[str] = None, triton_container_image: Optional[str] = None, triton_custom_operations: Optional[str] = None, triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT, measurement_steps_offline: int = 8, measurement_steps_online: int = 32, performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER, ): """ Args: model_name: Name of model framework: Framework used to create model container_version: Version of Triton Inference Server container used for evaluation batching: Mark if model support batching configurations: List of experiments configurations datasets_dir: Directory where datasets are stored datasets: Datasets used for conversion/export checkpoints: Checkpoints with trained model triton_load_model_method: Triton Inference Server model loading mode triton_dockerfile: Dockerfile for Triton to build custom image triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile triton_custom_operations: Path where custom operation library is stored measurement_steps_offline: Number of measurement steps in offline performance stage measurement_steps_online: Number of measurement steps in online performance stage performance_tool: Performance Tool used for generating results """ self.model_name = model_name self.ensemble_model_name = ensemble_model_name self.framework = framework self.container_version = container_version self.batching = batching self.configurations = configurations self.datasets_dir = datasets_dir self.datasets = datasets self.checkpoints = checkpoints self.triton_load_model_method = triton_load_model_method self.triton_dockerfile = triton_dockerfile self.triton_container_image = triton_container_image self.triton_custom_operations = triton_custom_operations self.measurement_steps_offline = measurement_steps_offline self.measurement_steps_online = measurement_steps_online self.performance_tool = performance_tool def to_file(self, file_path: Union[pathlib.Path, str]) -> None: """ Save config data to file Args: file_path: path to file where config data is should be stored Returns: None """ data = self.to_dict() with open(file_path, "w") as f: yaml.safe_dump(data, f) @staticmethod def from_dict(config_data: Dict): """ Create configuration object from data stored in dictionary Args: config_data: dictionary with config data Returns: Config object """ configurations = [] for configuration_data in config_data["configurations"]: configuration = Configuration(**configuration_data) configurations.append(configuration) checkpoints = [] for checkpoint_data in config_data.get("checkpoints", []): checkpoint = Checkpoint( name=checkpoint_data["name"], url=checkpoint_data["url"], ) checkpoints.append(checkpoint) datasets = [] for dataset_data in config_data.get("datasets", []): dataset = Dataset(name=dataset_data["name"]) datasets.append(dataset) return Config( model_name=config_data["model_name"], framework=config_data["framework"], container_version=config_data["container_version"], batching=config_data["batching"], configurations=configurations, checkpoints=checkpoints, datasets=datasets, datasets_dir=config_data.get("datasets_dir"), triton_load_model_method=config_data["triton_load_model_method"], triton_dockerfile=config_data.get("triton_dockerfile"), triton_custom_operations=config_data.get("triton_custom_operations"), measurement_steps_offline=config_data["measurement_steps_offline"], measurement_steps_online=config_data["measurement_steps_online"], performance_tool=PerformanceTool(config_data["performance_tool"]), ) @staticmethod def from_file(file_path: Union[pathlib.Path, str]): """ Load experiment data from file Args: file_path: path to file where experiment data is stored Returns: Experiment object """ with open(file_path) as f: config_data = yaml.safe_load(f) return Config.from_dict(config_data)
TensorFlow2/Detection/Efficientdet/utils
utils
horovod_utils
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import horovod.tensorflow.keras as hvd def get_rank(): try: return hvd.rank() except: return 0 def get_world_size(): try: return hvd.size() except: return 1 def is_main_process(): return get_rank() == 0
PyTorch/Segmentation/nnUNet
nnUNet
preprocess
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from data_preprocessing.preprocessor import Preprocessor from utils.utils import get_task_code parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--data", type=str, default="/data", help="Path to data directory") parser.add_argument("--results", type=str, default="/data", help="Path for saving results directory") parser.add_argument( "--exec_mode", type=str, default="training", choices=["training", "val", "test"], help="Mode for data preprocessing", ) parser.add_argument("--ohe", action="store_true", help="Add one-hot-encoding for foreground voxels (voxels > 0)") parser.add_argument("--verbose", action="store_true") parser.add_argument("--task", type=str, help="Number of task to be run. MSD uses numbers 01-10") parser.add_argument("--dim", type=int, default=3, choices=[2, 3], help="Data dimension to prepare") parser.add_argument("--n_jobs", type=int, default=-1, help="Number of parallel jobs for data preprocessing") if __name__ == "__main__": args = parser.parse_args() start = time.time() Preprocessor(args).run() task_code = get_task_code(args) path = os.path.join(args.data, task_code) if args.exec_mode == "test": path = os.path.join(path, "test") end = time.time() print(f"Pre-processing time: {(end - start):.2f}")
TensorFlow/Classification/ConvNets/triton/scripts
scripts
process_dataset
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if [ -d "${DATASETS_DIR}/imagenet" ]; then echo "Dataset already downloaded and processed." else python triton/process_dataset.py fi
TensorFlow/Detection/SSD/models/research/object_detection/models
models
ssd_mobilenet_v1_feature_extractor_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for ssd_mobilenet_v1_feature_extractor.""" import numpy as np import tensorflow as tf from object_detection.models import ssd_feature_extractor_test from object_detection.models import ssd_mobilenet_v1_feature_extractor slim = tf.contrib.slim class SsdMobilenetV1FeatureExtractorTest( ssd_feature_extractor_test.SsdFeatureExtractorTestBase): def _create_feature_extractor(self, depth_multiplier, pad_to_multiple, is_training=True, use_explicit_padding=False): """Constructs a new feature extractor. Args: depth_multiplier: float depth multiplier for feature extractor pad_to_multiple: the nearest multiple to zero pad the input height and width dimensions to. is_training: whether the network is in training mode. use_explicit_padding: Use 'VALID' padding for convolutions, but prepad inputs so that the output dimensions are the same as if 'SAME' padding were used. Returns: an ssd_meta_arch.SSDFeatureExtractor object. """ min_depth = 32 return ssd_mobilenet_v1_feature_extractor.SSDMobileNetV1FeatureExtractor( is_training, depth_multiplier, min_depth, pad_to_multiple, self.conv_hyperparams_fn, use_explicit_padding=use_explicit_padding) def test_extract_features_returns_correct_shapes_128(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_299(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_with_dynamic_image_shape(self): image_height = 128 image_width = 128 depth_multiplier = 1.0 pad_to_multiple = 1 expected_feature_map_shape = [(2, 8, 8, 512), (2, 4, 4, 1024), (2, 2, 2, 512), (2, 1, 1, 256), (2, 1, 1, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shapes_with_dynamic_inputs( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_enforcing_min_depth(self): image_height = 299 image_width = 299 depth_multiplier = 0.5**12 pad_to_multiple = 1 expected_feature_map_shape = [(2, 19, 19, 32), (2, 10, 10, 32), (2, 5, 5, 32), (2, 3, 3, 32), (2, 2, 2, 32), (2, 1, 1, 32)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_returns_correct_shapes_with_pad_to_multiple(self): image_height = 299 image_width = 299 depth_multiplier = 1.0 pad_to_multiple = 32 expected_feature_map_shape = [(2, 20, 20, 512), (2, 10, 10, 1024), (2, 5, 5, 512), (2, 3, 3, 256), (2, 2, 2, 256), (2, 1, 1, 128)] self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=False) self.check_extract_features_returns_correct_shape( 2, image_height, image_width, depth_multiplier, pad_to_multiple, expected_feature_map_shape, use_explicit_padding=True) def test_extract_features_raises_error_with_invalid_image_size(self): image_height = 32 image_width = 32 depth_multiplier = 1.0 pad_to_multiple = 1 self.check_extract_features_raises_error_with_invalid_image_size( image_height, image_width, depth_multiplier, pad_to_multiple) def test_preprocess_returns_correct_value_range(self): image_height = 128 image_width = 128 depth_multiplier = 1 pad_to_multiple = 1 test_image = np.random.rand(2, image_height, image_width, 3) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(test_image) self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0))) def test_variables_only_created_in_scope(self): depth_multiplier = 1 pad_to_multiple = 1 scope_name = 'MobilenetV1' self.check_feature_extractor_variables_under_scope( depth_multiplier, pad_to_multiple, scope_name) def test_has_fused_batchnorm(self): image_height = 40 image_width = 40 depth_multiplier = 1 pad_to_multiple = 1 image_placeholder = tf.placeholder(tf.float32, [1, image_height, image_width, 3]) feature_extractor = self._create_feature_extractor(depth_multiplier, pad_to_multiple) preprocessed_image = feature_extractor.preprocess(image_placeholder) _ = feature_extractor.extract_features(preprocessed_image) self.assertTrue(any(op.type == 'FusedBatchNorm' for op in tf.get_default_graph().get_operations())) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechRecognition/QuartzNet
QuartzNet
inference
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import os import random import time from heapq import nlargest from itertools import chain, repeat from pathlib import Path from tqdm import tqdm import dllogger import torch import numpy as np import torch.distributed as distrib from dllogger import JSONStreamBackend, StdOutBackend, Verbosity from quartznet import config from common import helpers from common.dali.data_loader import DaliDataLoader from common.dataset import (AudioDataset, FilelistDataset, get_data_loader, SingleAudioDataset) from common.features import BaseFeatures, FilterbankFeatures from common.helpers import print_once, process_evaluation_epoch from common.tb_dllogger import stdout_metric_format, unique_log_fpath from nemo_dle_model_converter import load_nemo_ckpt from quartznet.model import GreedyCTCDecoder, QuartzNet def get_parser(): parser = argparse.ArgumentParser(description='QuartzNet inference') parser.add_argument('--batch_size', default=16, type=int, help='Data batch size') parser.add_argument('--steps', default=0, type=int, help='Eval this many steps for every worker') parser.add_argument('--warmup_steps', default=0, type=int, help='Burn-in period before measuring latencies') parser.add_argument('--model_config', type=str, required=True, help='Relative model config path given dataset folder') parser.add_argument('--dataset_dir', type=str, help='Absolute path to dataset folder') parser.add_argument('--val_manifests', type=str, nargs='+', help='Relative path to evaluation dataset manifest files') parser.add_argument('--ckpt', default=None, type=str, help='Path to model checkpoint') parser.add_argument('--amp', '--fp16', action='store_true', help='Use FP16 precision') parser.add_argument('--cudnn_benchmark', action='store_true', help='Enable cudnn benchmark') parser.add_argument('--cpu', action='store_true', help='Run inference on CPU') parser.add_argument("--seed", default=None, type=int, help='Random seed') parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0), type=int, help='GPU id used for distributed training') io = parser.add_argument_group('feature and checkpointing setup') io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'], default='gpu', help='Use DALI pipeline for fast data processing') io.add_argument('--save_predictions', type=str, default=None, help='Save predictions in text form at this location') io.add_argument('--save_logits', default=None, type=str, help='Save output logits under specified path') io.add_argument('--transcribe_wav', type=str, help='Path to a single .wav file (16KHz)') io.add_argument('--transcribe_filelist', type=str, help='Path to a filelist with one .wav path per line') io.add_argument('-o', '--output_dir', default='results/', help='Output folder to save audio (file per phrase)') io.add_argument('--log_file', type=str, default=None, help='Path to a DLLogger log file') io.add_argument('--ema', action='store_true', help='Load averaged model weights') io.add_argument('--torchscript', action='store_true', help='Evaluate with a TorchScripted model') io.add_argument('--torchscript_export', action='store_true', help='Export the model with torch.jit to the output_dir') io.add_argument('--override_config', type=str, action='append', help='Overrides arbitrary config value.' ' Syntax: `--override_config nested.config.key=val`.') return parser def durs_to_percentiles(durations, ratios): durations = np.asarray(durations) * 1000 # in ms latency = durations latency = latency[5:] mean_latency = np.mean(latency) latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency) latency_ranges = get_percentile(ratios, latency_worst, len(latency)) latency_ranges[0.5] = mean_latency return latency_ranges def get_percentile(ratios, arr, nsamples): res = {} for a in ratios: idx = max(int(nsamples * (1 - a)), 0) res[a] = arr[idx] return res def torchscript_export(data_loader, audio_processor, model, greedy_decoder, output_dir, use_amp, use_conv_masks, model_config, device, save): audio_processor.to(device) for batch in data_loader: batch = [t.to(device, non_blocking=True) for t in batch] audio, audio_len, _, _ = batch feats, feat_lens = audio_processor(audio, audio_len) break print("\nExporting featurizer...") print("\nNOTE: Dithering causes warnings about non-determinism.\n") ts_feat = torch.jit.trace(audio_processor, (audio, audio_len)) print("\nExporting acoustic model...") model(feats, feat_lens) ts_acoustic = torch.jit.trace(model, (feats, feat_lens)) print("\nExporting decoder...") log_probs = model(feats, feat_lens) ts_decoder = torch.jit.script(greedy_decoder, log_probs) print("\nJIT export complete.") if save: precision = "fp16" if use_amp else "fp32" module_name = f'{os.path.basename(model_config)}_{precision}' ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt")) ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt")) ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt")) return ts_feat, ts_acoustic, ts_decoder def main(): parser = get_parser() args = parser.parse_args() log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json')) dllogger.init(backends=[ JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True), JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)), StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format) ]) [dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()] for step in ['DNN', 'data+DNN', 'data']: for c in [0.99, 0.95, 0.9, 0.5]: cs = 'avg' if c == 0.5 else f'{int(100*c)}%' dllogger.metadata(f'{step.lower()}_latency_{c}', {'name': f'{step} latency {cs}', 'format': ':>7.2f', 'unit': 'ms'}) dllogger.metadata( 'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'}) if args.cpu: device = torch.device('cpu') else: assert torch.cuda.is_available() device = torch.device('cuda') torch.backends.cudnn.benchmark = args.cudnn_benchmark if args.seed is not None: torch.manual_seed(args.seed + args.local_rank) np.random.seed(args.seed + args.local_rank) random.seed(args.seed + args.local_rank) # set up distributed training multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1 if multi_gpu: torch.cuda.set_device(args.local_rank) distrib.init_process_group(backend='nccl', init_method='env://') print_once(f'Inference with {distrib.get_world_size()} GPUs') if args.ckpt is not None: print(f'Loading the model from {args.ckpt} ...') print(f'{args.model_config} will be overriden.') if args.ckpt.lower().endswith('.nemo'): ckpt, cfg = load_nemo_ckpt(args.ckpt) else: cfg = config.load(args.model_config) ckpt = torch.load(args.ckpt, map_location='cpu') sd_key = 'ema_state_dict' if args.ema else 'state_dict' if args.ema and 'ema_state_dict' not in ckpt: print(f'WARNING: EMA weights are unavailable in {args.ckpt}.') sd_key = 'state_dict' state_dict = ckpt[sd_key] else: cfg = config.load(args.model_config) state_dict = None config.apply_config_overrides(cfg, args) symbols = helpers.add_ctc_blank(cfg['labels']) use_dali = args.dali_device in ('cpu', 'gpu') dataset_kw, features_kw = config.input(cfg, 'val') measure_perf = args.steps > 0 # dataset if args.transcribe_wav or args.transcribe_filelist: if use_dali: print("DALI supported only with input .json files; disabling") use_dali = False assert not cfg['input_val']['audio_dataset'].get('pad_to_max_duration', False) assert not (args.transcribe_wav and args.transcribe_filelist) if args.transcribe_wav: dataset = SingleAudioDataset(args.transcribe_wav) else: dataset = FilelistDataset(args.transcribe_filelist) data_loader = get_data_loader(dataset, batch_size=1, multi_gpu=multi_gpu, shuffle=False, num_workers=0, drop_last=(True if measure_perf else False)) _, features_kw = config.input(cfg, 'val') feat_proc = FilterbankFeatures(**features_kw) elif use_dali: # pad_to_max_duration is not supported by DALI - have simple padders if features_kw['pad_to_max_duration']: feat_proc = BaseFeatures( pad_align=features_kw['pad_align'], pad_to_max_duration=True, max_duration=features_kw['max_duration'], sample_rate=features_kw['sample_rate'], window_size=features_kw['window_size'], window_stride=features_kw['window_stride']) features_kw['pad_to_max_duration'] = False else: feat_proc = None data_loader = DaliDataLoader( gpu_id=args.local_rank or 0, dataset_path=args.dataset_dir, config_data=dataset_kw, config_features=features_kw, json_names=args.val_manifests, batch_size=args.batch_size, pipeline_type=("train" if measure_perf else "val"), # no drop_last device_type=args.dali_device, symbols=symbols) else: dataset = AudioDataset(args.dataset_dir, args.val_manifests, symbols, **dataset_kw) data_loader = get_data_loader(dataset, args.batch_size, multi_gpu=multi_gpu, shuffle=False, num_workers=4, drop_last=False) feat_proc = FilterbankFeatures(**features_kw) model = QuartzNet(encoder_kw=config.encoder(cfg), decoder_kw=config.decoder(cfg, n_classes=len(symbols))) if state_dict is not None: model.load_state_dict(state_dict, strict=True) model.to(device) model.eval() if feat_proc is not None: feat_proc.to(device) feat_proc.eval() if args.amp: model = model.half() if args.torchscript: greedy_decoder = GreedyCTCDecoder() feat_proc, model, greedy_decoder = torchscript_export( data_loader, feat_proc, model, greedy_decoder, args.output_dir, use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml, device=device, save=args.torchscript_export) if multi_gpu: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank) agg = {'txts': [], 'preds': [], 'logits': []} dur = {'data': [], 'dnn': [], 'data+dnn': []} looped_loader = chain.from_iterable(repeat(data_loader)) greedy_decoder = GreedyCTCDecoder() sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None steps = args.steps + args.warmup_steps or len(data_loader) with torch.no_grad(): for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)): if use_dali: feats, feat_lens, txt, txt_lens = batch if feat_proc is not None: feats, feat_lens = feat_proc(feats, feat_lens) else: batch = [t.to(device, non_blocking=True) for t in batch] audio, audio_lens, txt, txt_lens = batch feats, feat_lens = feat_proc(audio, audio_lens) sync() t1 = time.time() if args.amp: feats = feats.half() if model.encoder.use_conv_masks: log_probs, log_prob_lens = model(feats, feat_lens) else: log_probs = model(feats, feat_lens) preds = greedy_decoder(log_probs) sync() t2 = time.time() # burn-in period; wait for a new loader due to num_workers if it >= 1 and (args.steps == 0 or it >= args.warmup_steps): dur['data'].append(t1 - t0) dur['dnn'].append(t2 - t1) dur['data+dnn'].append(t2 - t0) if txt is not None: agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], symbols) agg['preds'] += helpers.gather_predictions([preds], symbols) agg['logits'].append(log_probs) if it + 1 == steps: break sync() t0 = time.time() # communicate the results if args.transcribe_wav: for idx, p in enumerate(agg['preds']): print_once(f'Prediction {idx+1: >3}: {p}') elif args.transcribe_filelist: pass elif not multi_gpu or distrib.get_rank() == 0: wer, _ = process_evaluation_epoch(agg) dllogger.log(step=(), data={'eval_wer': 100 * wer}) if args.save_predictions: with open(args.save_predictions, 'w') as f: f.write('\n'.join(agg['preds'])) if args.save_logits: logits = torch.cat(agg['logits'], dim=0).cpu() torch.save(logits, args.save_logits) # report timings if len(dur['data']) >= 20: ratios = [0.9, 0.95, 0.99] for stage in dur: lat = durs_to_percentiles(dur[stage], ratios) for k in [0.99, 0.95, 0.9, 0.5]: kk = str(k).replace('.', '_') dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]}) else: print_once('Not enough samples to measure latencies.') if __name__ == "__main__": main()
TensorFlow2/LanguageModeling/BERT
BERT
dllogger_class
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from dllogger import Logger, StdOutBackend, JSONStreamBackend, Verbosity import numpy class dllogger_class(): def format_step(self, step): if isinstance(step, str): return step elif isinstance(step, int): return "Iteration: {} ".format(step) elif len(step) > 0: return "Iteration: {} ".format(step[0]) else: return "" def __init__(self, log_path="bert_dllog.json"): self.logger = Logger([ StdOutBackend(Verbosity.DEFAULT, step_format=self.format_step), JSONStreamBackend(Verbosity.VERBOSE, log_path), ]) self.logger.metadata("mlm_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) self.logger.metadata("nsp_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) self.logger.metadata("avg_loss_step", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) self.logger.metadata("total_loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) self.logger.metadata("loss", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) self.logger.metadata("f1", {"unit": None, "format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"}) self.logger.metadata("precision", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"}) self.logger.metadata("recall", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"}) self.logger.metadata("mcc", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"}) self.logger.metadata("exact_match", {"format": ":.4f", "GOAL": "MINIMIZE", "STAGE": "VAL"}) self.logger.metadata( "throughput_train", {"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "TRAIN"}, ) self.logger.metadata( "throughput_inf", {"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "VAL"}, ) self.logger.metadata( "throughput_val", {"unit": "sequences/s", "format": ":.3f", "GOAL": "MAXIMIZE", "STAGE": "VAL"}, )
TensorFlow/Classification/ConvNets/resnext101-32x4d/training
training
DGX1_RNxt101-32x4d_AMP_90E
#!/bin/bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. WORKSPACE=${1:-"/workspace/rn50v15_tf"} DATA_DIR=${2:-"/data"} OTHER=${@:3} if [[ ! -z "${BIND_TO_SOCKET}" ]]; then BIND_TO_SOCKET="--bind-to socket" fi mpiexec --allow-run-as-root ${BIND_TO_SOCKET} -np 8 python3 main.py --arch=resnext101-32x4d \ --mode=train_and_evaluate --iter_unit=epoch --num_iter=90 \ --batch_size=128 --warmup_steps=100 --cosine_lr --label_smoothing 0.1 \ --lr_init=0.256 --lr_warmup_epochs=8 --momentum=0.875 --weight_decay=6.103515625e-05 \ --amp --static_loss_scale 128 \ --data_dir=${DATA_DIR}/tfrecords --data_idx_dir=${DATA_DIR}/dali_idx \ --results_dir=${WORKSPACE}/results --weight_init=fan_in ${OTHER}
Tools/DGLPyTorch/SyntheticGraphGeneration/syngen/generator/tabular/data_transformer
data_transformer
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
PyTorch/Classification/ConvNets/resnet50v1.5
resnet50v1.5
README
# ResNet50 v1.5 For PyTorch This repository provides a script and recipe to train the ResNet50 model to achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA. ## Table Of Contents * [Model overview](#model-overview) * [Default configuration](#default-configuration) * [Optimizer](#optimizer) * [Data augmentation](#data-augmentation) * [DALI](#dali) * [Feature support matrix](#feature-support-matrix) * [Features](#features) * [Mixed precision training](#mixed-precision-training) * [Enabling mixed precision](#enabling-mixed-precision) * [Enabling TF32](#enabling-tf32) * [Setup](#setup) * [Requirements](#requirements) * [Quick Start Guide](#quick-start-guide) * [Advanced](#advanced) * [Scripts and sample code](#scripts-and-sample-code) * [Command-line options](#command-line-options) * [Dataset guidelines](#dataset-guidelines) * [Training process](#training-process) * [Inference process](#inference-process) * [Performance](#performance) * [Benchmarking](#benchmarking) * [Training performance benchmark](#training-performance-benchmark) * [Inference performance benchmark](#inference-performance-benchmark) * [Results](#results) * [Training accuracy results](#training-accuracy-results) * [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb) * [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb) * [Training accuracy: NVIDIA DGX-2 (16x V100 32GB)](#training-accuracy-nvidia-dgx-2-16x-v100-32gb) * [Example plots](#example-plots) * [Training performance results](#training-performance-results) * [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb) * [Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)](#training-performance-nvidia-dgx-1-16gb-8x-v100-16gb) * [Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)](#training-performance-nvidia-dgx-1-32gb-8x-v100-32gb) * [Inference performance results](#inference-performance-results) * [Inference performance: NVIDIA DGX-1 16GB (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb) * [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4) * [Release notes](#release-notes) * [Changelog](#changelog) * [Known issues](#known-issues) ## Model overview The ResNet50 v1.5 model is a modified version of the [original ResNet50 v1 model](https://arxiv.org/abs/1512.03385). The difference between v1 and v1.5 is that, in the bottleneck blocks which requires downsampling, v1 has stride = 2 in the first 1x1 convolution, whereas v1.5 has stride = 2 in the 3x3 convolution. This difference makes ResNet50 v1.5 slightly more accurate (~0.5% top1) than v1, but comes with a smallperformance drawback (~5% imgs/sec). The model is initialized as described in [Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification](https://arxiv.org/pdf/1502.01852.pdf) This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time. We are currently working on adding [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) support for Mixed Precision training. ### Default configuration The following sections highlight the default configurations for the ResNet50 model. #### Optimizer This model uses SGD with momentum optimizer with the following hyperparameters: * Momentum (0.875) * Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly scale the learning rate. * Learning rate schedule - we use cosine LR schedule * For bigger batch sizes (512 and up) we use linear warmup of the learning rate during the first couple of epochs according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677). Warmup length depends on the total training length. * Weight decay (WD)= 3.0517578125e-05 (1/32768). * We do not apply WD on Batch Norm trainable parameters (gamma/bias) * Label smoothing = 0.1 * We train for: * 50 Epochs -> configuration that reaches 75.9% top1 accuracy * 90 Epochs -> 90 epochs is a standard for ImageNet networks * 250 Epochs -> best possible accuracy. * For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf). #### Data augmentation This model uses the following data augmentation: * For training: * Normalization * Random resized crop to 224x224 * Scale from 8% to 100% * Aspect ratio from 3/4 to 4/3 * Random horizontal flip * For inference: * Normalization * Scale to 256x256 * Center crop to 224x224 #### Other training recipes This script does not target any specific benchmark. There are changes that others have made which can speed up convergence and/or increase accuracy. One of the more popular training recipes is provided by [fast.ai](https://github.com/fastai/imagenet-fast). The fast.ai recipe introduces many changes to the training procedure, one of which is progressive resizing of the training images. The first part of training uses 128px images, the middle part uses 224px images, and the last part uses 288px images. The final validation is performed on 288px images. Training script in this repository performs validation on 224px images, just like the original paper described. These two approaches can't be directly compared, since the fast.ai recipe requires validation on 288px images, and this recipe keeps the original assumption that validation is done on 224px images. Using 288px images means that a lot more FLOPs are needed during inference to reach the same accuracy. ### Feature support matrix The following features are supported by this model: | Feature | ResNet50 |-----------------------|-------------------------- |[DALI](https://docs.nvidia.com/deeplearning/sdk/dali-release-notes/index.html) | Yes |[APEX AMP](https://nvidia.github.io/apex/amp.html) | Yes | #### Features - NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader with the DALI library. For more information about DALI, refer to the [DALI product documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html). - [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as [Automatic Mixed Precision (AMP)](https://nvidia.github.io/apex/amp.html), which require minimal network code changes to leverage Tensor Cores performance. Refer to the [Enabling mixed precision](#enabling-mixed-precision) section for more details. ### DALI We use [NVIDIA DALI](https://github.com/NVIDIA/DALI), which speeds up data loading when CPU becomes a bottleneck. DALI can use CPU or GPU, and outperforms the PyTorch native dataloader. Run training with `--data-backends dali-gpu` or `--data-backends dali-cpu` to enable DALI. For DGXA100 and DGX1 we recommend `--data-backends dali-cpu`, for DGX2 we recommend `--data-backends dali-gpu`. ### Mixed precision training Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps: 1. Porting the model to use the FP16 data type where appropriate. 2. Adding loss scaling to preserve small gradient values. The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK. For information about: - How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation. - Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog. - APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/). #### Enabling mixed precision Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), a library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval, while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients. In PyTorch, loss scaling can be easily applied by using scale_loss() method provided by AMP. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed. For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage tensor cores performance. To enable mixed precision, you can: - Import AMP from APEX: ```python from apex import amp ``` - Wrap model and optimizer in amp.initialize: ```python model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale="dynamic") ``` - Scale loss before backpropagation: ```python with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() ``` #### Enabling TF32 TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs. TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations. For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post. TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default. ## Setup The following section lists the requirements that you need to meet in order to start training the ResNet50 model. ### Requirements This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components: * [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker) * [PyTorch 21.03-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer * Supported GPUs: * [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/) * [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/) * [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/) For more information about how to get started with NGC containers, see the following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning DGX Documentation: * [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html) * [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry) * [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running) For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html). ## Quick Start Guide ### 1. Clone the repository. ``` git clone https://github.com/NVIDIA/DeepLearningExamples cd DeepLearningExamples/PyTorch/Classification/ ``` ### 2. Download and preprocess the dataset. The ResNet50 script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge. PyTorch can work directly on JPEGs, therefore, preprocessing/augmentation is not needed. To train your model using mixed or TF32 precision with Tensor Cores or using FP32, perform the following steps using the default parameters of the resnet50 model on the ImageNet dataset. For the specifics concerning training and inference, see the [Advanced](#advanced) section. 1. [Download the images](http://image-net.org/download-images). 2. Extract the training data: ```bash mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done cd .. ``` 3. Extract the validation data and move the images to subfolders: ```bash mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash ``` The directory in which the `train/` and `val/` directories are placed, is referred to as `<path to imagenet>` in this document. ### 3. Build the ResNet50 PyTorch NGC container. ``` docker build . -t nvidia_resnet50 ``` ### 4. Start an interactive session in the NGC container to run training/inference. ``` nvidia-docker run --rm -it -v <path to imagenet>:/imagenet --ipc=host nvidia_resnet50 ``` ### 5. Start training To run training for a standard configuration (DGXA100/DGX1V/DGX2V, AMP/TF32/FP32, 90/250 Epochs), run one of the scripts in the `./resnet50v1.5/training` directory called `./resnet50v1.5/training/{AMP, TF32, FP32}/{ DGXA100, DGX1V, DGX2V }_resnet50_{AMP, TF32, FP32}_{ 90, 250 }E.sh`. Ensure ImageNet is mounted in the `/imagenet` directory. Example: `bash ./resnet50v1.5/training/AMP/DGX1_resnet50_AMP_250E.sh <path were to store checkpoints and logs>` ### 6. Start inference You can download pretrained weights from NGC: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/zip -O resnet50_pyt_amp_20.06.0.zip unzip resnet50_pyt_amp_20.06.0.zip ``` To run inference on ImageNet, run: `python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file nvidia_resnet50_200821.pth.tar -b <batch size> <path to imagenet>` To run inference on JPEG image using pretrained weights: `python classify.py --arch resnet50 --pretrained-from-file nvidia_resnet50_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>` ## Advanced The following sections provide greater details of the dataset, running training and inference, and the training results. ### Scripts and sample code To run a non standard configuration use: * For 1 GPU * FP32 `python ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 <path to imagenet>` `python ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>` * For multiple GPUs * FP32 `python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 <path to imagenet>` * AMP `python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>` Use `python ./main.py -h` to obtain the list of available options in the `main.py` script. ### Command-line options: To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example: `python main.py -h` ``` usage: main.py [-h] [--data-backend BACKEND] [--arch ARCH] [--model-config CONF] [-j N] [--epochs N] [--run-epochs N] [-b N] [--optimizer-batch-size N] [--lr LR] [--lr-schedule SCHEDULE] [--warmup E] [--label-smoothing S] [--mixup ALPHA] [--momentum M] [--weight-decay W] [--bn-weight-decay] [--nesterov] [--print-freq N] [--resume PATH] [--pretrained-from-file PATH] [--static-loss-scale STATIC_LOSS_SCALE] [--dynamic-loss-scale] [--prof N] [--amp] [--seed SEED] [--gather-checkpoints] [--raport-file RAPORT_FILE] [--evaluate] [--training-only] [--no-checkpoints] [--checkpoint-filename CHECKPOINT_FILENAME] [--workspace DIR] [--memory-format {nchw,nhwc}] DIR PyTorch ImageNet Training positional arguments: DIR path to dataset optional arguments: -h, --help show this help message and exit --data-backend BACKEND data backend: pytorch | synthetic | dali-gpu | dali-cpu (default: dali-cpu) --arch ARCH, -a ARCH model architecture: resnet18 | resnet34 | resnet50 | resnet101 | resnet152 | resnext50-32x4d | resnext101-32x4d | resnext101-32x8d | resnext101-32x8d-basic | se-resnext101-32x4d (default: resnet50) --model-config CONF, -c CONF model configs: classic | fanin | grp-fanin | grp- fanout(default: classic) -j N, --workers N number of data loading workers (default: 5) --epochs N number of total epochs to run --run-epochs N run only N epochs, used for checkpointing runs -b N, --batch-size N mini-batch size (default: 256) per gpu --optimizer-batch-size N size of a total batch size, for simulating bigger batches using gradient accumulation --lr LR, --learning-rate LR initial learning rate --lr-schedule SCHEDULE Type of LR schedule: step, linear, cosine --warmup E number of warmup epochs --label-smoothing S label smoothing --mixup ALPHA mixup alpha --momentum M momentum --weight-decay W, --wd W weight decay (default: 1e-4) --bn-weight-decay use weight_decay on batch normalization learnable parameters, (default: false) --nesterov use nesterov momentum, (default: false) --print-freq N, -p N print frequency (default: 10) --resume PATH path to latest checkpoint (default: none) --pretrained-from-file PATH load weights from here --static-loss-scale STATIC_LOSS_SCALE Static loss scale, positive power of 2 values can improve amp convergence. --dynamic-loss-scale Use dynamic loss scaling. If supplied, this argument supersedes --static-loss-scale. --prof N Run only N iterations --amp Run model AMP (automatic mixed precision) mode. --seed SEED random seed used for numpy and pytorch --gather-checkpoints Gather checkpoints throughout the training, without this flag only best and last checkpoints will be stored --raport-file RAPORT_FILE file in which to store JSON experiment raport --evaluate evaluate checkpoint/model --training-only do not evaluate --no-checkpoints do not store any checkpoints, useful for benchmarking --checkpoint-filename CHECKPOINT_FILENAME --workspace DIR path to directory where checkpoints will be stored --memory-format {nchw,nhwc} memory layout, nchw or nhwc ``` ### Dataset guidelines To use your own dataset, divide it in directories as in the following scheme: - Training images - `train/<class id>/<image>` - Validation images - `val/<class id>/<image>` If your dataset's has number of classes different than 1000, you need to pass `--num_classes N` flag to the training script. ### Training process All the results of the training will be stored in the directory specified with `--workspace` argument. Script will store: - most recent checkpoint - `checkpoint.pth.tar` (unless `--no-checkpoints` flag is used). - checkpoint with best validation accuracy - `model_best.pth.tar` (unless `--no-checkpoints` flag is used). - JSON log - in the file specified with `--raport-file` flag. Metrics gathered through training: - `train.loss` - training loss - `train.total_ips` - training speed measured in images/second - `train.compute_ips` - training speed measured in images/second, not counting data loading - `train.data_time` - time spent on waiting on data - `train.compute_time` - time spent in forward/backward pass To restart training from checkpoint use `--resume` option. To start training from pretrained weights (e.g. downloaded from NGC) use `--pretrained-from-file` option. The difference between those two is that the pretrained weights contain only model weights, and checkpoints, apart from model weights, contain optimizer state, LR scheduler state. Checkpoints are suitable for dividing the training into parts, for example in order to divide the training job into shorter stages, or restart training after infrastructure fail. Pretrained weights can be used as a base for finetuning the model to a different dataset, or as a backbone to detection models. ### Inference process Validation is done every epoch, and can be also run separately on a checkpointed model. `python ./main.py --arch resnet50 --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>` Metrics gathered through training: - `val.loss` - validation loss - `val.top1` - validation top1 accuracy - `val.top5` - validation top5 accuracy - `val.total_ips` - inference speed measured in images/second - `val.compute_ips` - inference speed measured in images/second, not counting data loading - `val.data_time` - time spent on waiting on data - `val.compute_time` - time spent on inference To run inference on JPEG image, you have to first extract the model weights from checkpoint: `python checkpoint2model.py --checkpoint-path <path to checkpoint> --weight-path <path where weights will be stored>` Then run classification script: `python classify.py --arch resnet50 --pretrained-from-file <path to weights from previous step> --precision AMP|FP32 --image <path to JPEG image>` You can also run ImageNet validation on pretrained weights: `python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file <path to pretrained weights> -b <batch size> <path to imagenet>` #### NGC Pretrained weights: Pretrained weights can be downloaded from NGC: ```bash wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/zip -O resnet50_pyt_amp_20.06.0.zip unzip resnet50_pyt_amp_20.06.0.zip ``` To run inference on ImageNet, run: `python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file nvidia_resnet50_200821.pth.tar -b <batch size> <path to imagenet>` To run inference on JPEG image using pretrained weights: `python classify.py --arch resnet50 --weights nvidia_resnet50_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>` ## Performance The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference). ### Benchmarking The following section shows how to run benchmarks measuring the model performance in training and inference modes. #### Training performance benchmark To benchmark training, run: * For 1 GPU * FP32 (V100 GPUs only) `python ./launch.py --model resnet50 --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./launch.py --model resnet50 --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./launch.py --model resnet50 --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * For multiple GPUs * FP32 (V100 GPUs only) `python ./launch.py --model resnet50 --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` Each of these scripts will run 100 iterations and save results in the `benchmark.json` file. #### Inference performance benchmark To benchmark inference, run: * FP32 (V100 GPUs only) `python ./launch.py --model resnet50 --precision FP32 --mode benchmark_inference --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * TF32 (A100 GPUs only) `python ./launch.py --model resnet50 --precision TF32 --mode benchmark_inference --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` * AMP `python ./launch.py --model resnet50 --precision AMP --mode benchmark_inference --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100` Each of these scripts will run 100 iterations and save results in the `benchmark.json` file. ### Results #### Training accuracy results Our results were obtained by running the applicable training script in the pytorch-20.12 NGC container. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB) | **Epochs** | **Mixed Precision Top1** | **TF32 Top1** | |:----------:|:------------------------:|:--------------:| | 90 | 77.12 +/- 0.11 | 76.95 +/- 0.18 | | 250 | 78.43 +/- 0.11 | 78.38 +/- 0.17 | ##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB) | **Epochs** | **Mixed Precision Top1** | **FP32 Top1** | |:----------:|:------------------------:|:--------------:| | 90 | 76.88 +/- 0.16 | 77.01 +/- 0.16 | | 250 | 78.25 +/- 0.12 | 78.30 +/- 0.16 | ##### Training accuracy: NVIDIA DGX-2 (16x V100 32GB) | **epochs** | **Mixed Precision Top1** | **FP32 Top1** | |:-:|:-:|:-:| | 50 | 75.81 +/- 0.08 | 76.04 +/- 0.05 | | 90 | 77.10 +/- 0.06 | 77.23 +/- 0.04 | | 250 | 78.59 +/- 0.13 | 78.46 +/- 0.03 | ##### Example plots The following images show a 250 epochs configuration on a DGX-1V. ![ValidationLoss](./img/loss_plot.png) ![ValidationTop1](./img/top1_plot.png) ![ValidationTop5](./img/top5_plot.png) #### Training performance results Our results were obtained by running the applicable training script in the pytorch-21.03 NGC container. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Training performance: NVIDIA DGX A100 (8x A100 80GB) | **GPUs** | **Throughput - TF32** | **Throughput - mixed precision** | **Throughput speedup (TF32 to mixed precision)** | **TF32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **TF32 Training Time (90E)** | |:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:| | 1 | 938 img/s | 2470 img/s | 2.63 x | 1.0 x | 1.0 x | ~14 hours | ~36 hours | | 8 | 7248 img/s | 16621 img/s | 2.29 x | 7.72 x | 6.72 x | ~3 hours | ~5 hours | ##### Training performance: NVIDIA DGX-1 16GB (8x V100 16GB) | **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** | |:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:| | 1 | 367 img/s | 1200 img/s | 3.26 x | 1.0 x | 1.0 x | ~29 hours | ~92 hours | | 8 | 2855 img/s | 8322 img/s | 2.91 x | 7.76 x | 6.93 x | ~5 hours | ~12 hours | ##### Training performance: NVIDIA DGX-1 32GB (8x V100 32GB) | **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** | |:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:| | 1 | 356 img/s | 1156 img/s | 3.24 x | 1.0 x | 1.0 x | ~30 hours | ~95 hours | | 8 | 2766 img/s | 8056 img/s | 2.91 x | 7.75 x | 6.96 x | ~5 hours | ~13 hours | #### Inference performance results Our results were obtained by running the applicable training script in the pytorch-21.03 NGC container. To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide). ##### Inference performance: NVIDIA DGX-1 (1x V100 16GB) ###### FP32 Inference Latency | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | 1 | 96 img/s | 10.37 ms | 10.81 ms | 11.73 ms | | 2 | 196 img/s | 10.24 ms | 11.18 ms | 12.89 ms | | 4 | 386 img/s | 10.46 ms | 11.01 ms | 11.75 ms | | 8 | 709 img/s | 11.5 ms | 12.36 ms | 13.12 ms | | 16 | 1023 img/s | 16.07 ms | 15.69 ms | 15.97 ms | | 32 | 1127 img/s | 29.37 ms | 28.53 ms | 28.67 ms | | 64 | 1200 img/s | 55.4 ms | 53.5 ms | 53.71 ms | | 128 | 1229 img/s | 109.26 ms | 104.04 ms | 104.34 ms | | 256 | 1261 img/s | 214.48 ms | 202.51 ms | 202.88 ms | ###### Mixed Precision Inference Latency | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | 1 | 78 img/s | 12.78 ms | 13.27 ms | 14.36 ms | | 2 | 154 img/s | 13.01 ms | 13.74 ms | 15.19 ms | | 4 | 300 img/s | 13.41 ms | 14.25 ms | 15.68 ms | | 8 | 595 img/s | 13.65 ms | 14.51 ms | 15.6 ms | | 16 | 1178 img/s | 14.0 ms | 15.07 ms | 16.26 ms | | 32 | 2146 img/s | 15.84 ms | 17.25 ms | 18.53 ms | | 64 | 2984 img/s | 23.18 ms | 21.51 ms | 21.93 ms | | 128 | 3249 img/s | 43.55 ms | 39.36 ms | 40.1 ms | | 256 | 3382 img/s | 84.14 ms | 75.3 ms | 80.08 ms | ##### Inference performance: NVIDIA T4 ###### FP32 Inference Latency | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | 1 | 98 img/s | 10.7 ms | 12.82 ms | 16.71 ms | | 2 | 186 img/s | 11.26 ms | 13.79 ms | 16.99 ms | | 4 | 325 img/s | 12.73 ms | 13.89 ms | 18.03 ms | | 8 | 363 img/s | 22.41 ms | 22.57 ms | 22.9 ms | | 16 | 409 img/s | 39.77 ms | 39.8 ms | 40.23 ms | | 32 | 420 img/s | 77.62 ms | 76.92 ms | 77.28 ms | | 64 | 428 img/s | 152.73 ms | 152.03 ms | 153.02 ms | | 128 | 426 img/s | 309.26 ms | 303.38 ms | 305.13 ms | | 256 | 415 img/s | 635.98 ms | 620.16 ms | 625.21 ms | ###### Mixed Precision Inference Latency | **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** | |:--------------:|:------------------:|:---------------:|:---------------:|:---------------:| | 1 | 79 img/s | 12.96 ms | 15.47 ms | 20.0 ms | | 2 | 156 img/s | 13.18 ms | 14.9 ms | 18.73 ms | | 4 | 317 img/s | 12.99 ms | 14.69 ms | 19.05 ms | | 8 | 652 img/s | 12.82 ms | 16.04 ms | 19.43 ms | | 16 | 1050 img/s | 15.8 ms | 16.57 ms | 20.62 ms | | 32 | 1128 img/s | 29.54 ms | 28.79 ms | 28.97 ms | | 64 | 1165 img/s | 57.41 ms | 55.67 ms | 56.11 ms | | 128 | 1190 img/s | 114.24 ms | 109.17 ms | 110.41 ms | | 256 | 1198 img/s | 225.95 ms | 215.28 ms | 222.94 ms | ## Release notes ### Changelog 1. September 2018 * Initial release 2. January 2019 * Added options Label Smoothing, fan-in initialization, skipping weight decay on batch norm gamma and bias. 3. May 2019 * Cosine LR schedule * MixUp regularization * DALI support * DGX2 configurations * gradients accumulation 4. July 2019 * DALI-CPU dataloader * Updated README 5. July 2020 * Added A100 scripts * Updated README 6. February 2021 * Moved from APEX AMP to Native AMP ### Known issues There are no known issues with this model.
TensorFlow/LanguageModeling/Transformer-XL
Transformer-XL
getdata
# BSD 3-Clause License # # Copyright (c) 2017, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. echo "=== Acquiring datasets ===" echo "---" mkdir -p data cd data echo "- Downloading WikiText-103 (WT2)" if [[ ! -d 'wikitext-103' ]]; then wget --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-v1.zip unzip -q wikitext-103-v1.zip cd wikitext-103 mv wiki.train.tokens train.txt mv wiki.valid.tokens valid.txt mv wiki.test.tokens test.txt cd .. fi if [[ $1 != 'full' ]]; then exit 0 fi if [[ ! -d 'wikitext-2' ]]; then echo "- Downloading WikiText-2 (WT2)" wget --quiet --continue https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-v1.zip unzip -q wikitext-2-v1.zip cd wikitext-2 mv wiki.train.tokens train.txt mv wiki.valid.tokens valid.txt mv wiki.test.tokens test.txt cd .. fi echo "- Downloading enwik8 (Character)" if [[ ! -d 'enwik8' ]]; then mkdir -p enwik8 cd enwik8 wget --continue http://mattmahoney.net/dc/enwik8.zip wget https://raw.githubusercontent.com/salesforce/awd-lstm-lm/master/data/enwik8/prep_enwik8.py python3 prep_enwik8.py cd .. fi echo "- Downloading text8 (Character)" if [[ ! -d 'text8' ]]; then mkdir -p text8 cd text8 wget --continue http://mattmahoney.net/dc/text8.zip python ../../prep_text8.py cd .. fi echo "- Downloading Penn Treebank (PTB)" if [[ ! -d 'penn' ]]; then wget --quiet --continue http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz tar -xzf simple-examples.tgz mkdir -p penn cd penn mv ../simple-examples/data/ptb.train.txt train.txt mv ../simple-examples/data/ptb.test.txt test.txt mv ../simple-examples/data/ptb.valid.txt valid.txt cd .. echo "- Downloading Penn Treebank (Character)" mkdir -p pennchar cd pennchar mv ../simple-examples/data/ptb.char.train.txt train.txt mv ../simple-examples/data/ptb.char.test.txt test.txt mv ../simple-examples/data/ptb.char.valid.txt valid.txt cd .. rm -rf simple-examples/ fi echo "- Downloading 1B words" if [[ ! -d 'one-billion-words' ]]; then mkdir -p one-billion-words cd one-billion-words wget --no-proxy http://www.statmt.org/lm-benchmark/1-billion-word-language-modeling-benchmark-r13output.tar.gz tar xzvf 1-billion-word-language-modeling-benchmark-r13output.tar.gz path="1-billion-word-language-modeling-benchmark-r13output/heldout-monolingual.tokenized.shuffled/" cat ${path}/news.en.heldout-00000-of-00050 > valid.txt cat ${path}/news.en.heldout-00000-of-00050 > test.txt wget https://github.com/rafaljozefowicz/lm/raw/master/1b_word_vocab.txt cd .. fi echo "---" echo "Happy language modeling :)"
TensorFlow2/Recommendation/WideAndDeep/triton/runner
runner
exporter
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pathlib # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = pathlib.Path(__file__).parent.name from .core import Command from .exceptions import RunnerException from .stages import Stage class CommandsExporter: """ Command exported to BASH scripts """ def __init__(self, scripts_dir: pathlib.Path): """ Args: scripts_dir: Paths where scripts should be stored """ self._scripts_dir = scripts_dir def export(self, stage: Stage) -> Command: """ Export stage commands to script and return new command to execute Args: stage: Stage object with commands Returns: Command object with script execution command """ filename = self._get_filename(stage.label) file_path = self._scripts_dir / filename with open(file_path, "w+") as stagefile: stagefile.write("set -x\n") stagefile.write("set -e\n") stagefile.write("export PYTHONUNBUFFERED=1\n") stagefile.write("export PYTHONPATH=`pwd`\n") for command in stage.commands: for line in str(command).split("\n"): stagefile.write(str(line.rstrip())) stagefile.write("\n") stagefile.write("\n") result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}') if result != 0: raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}") command = Command(f"bash -xe {file_path.as_posix()}") return command def _get_filename(self, label: str): """ Generate filename for script based on label Args: label: String with stage label Returns: String with script filename """ filename = label.replace(" ", "_").lower() filename = f"{filename}.sh" return filename
TensorFlow2/Recommendation/WideAndDeep/triton
triton
tf_dataloader
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import numpy as np import pandas as pd import tensorflow as tf from data.outbrain.features import get_features_keys, MULTIHOT_COLUMNS def prepare_df(df): for multihot_key, value in MULTIHOT_COLUMNS.items(): multihot_col = df.pop(multihot_key) for i in range(value): df[f"{multihot_key}_{i}"] = multihot_col.apply( lambda x: x[i] if len(x) > i else -1 ) df[f"{multihot_key}_nnzs"] = multihot_col.apply(lambda x: len(x)) for col in df.columns: if np.issubdtype(df[col].dtype, np.integer): df[col] = df[col].astype(np.int32) if np.issubdtype(df[col].dtype, np.floating): df[col] = df[col].astype(np.float32) return df def _merge_multihots(*multihots, axis=1): expanded = [tf.expand_dims(multihot, axis) for multihot in multihots] concatenated = tf.concat(expanded, axis) reshaped = tf.reshape(concatenated, [-1]) mask = tf.math.not_equal(reshaped, -1) filtered = tf.boolean_mask(reshaped, mask) return tf.reshape(filtered, [-1, 1]) def _filter_batch(elem): label = elem.pop("clicked") label = tf.reshape(label, [-1, 1]) disp_id = elem.pop("display_id") for multihot_key, value in MULTIHOT_COLUMNS.items(): multihot_values = [elem.pop(f"{multihot_key}_{i}") for i in range(value)] multihot_nnzs = elem.pop(f"{multihot_key}_nnzs") values = _merge_multihots(*multihot_values) row_lengths = multihot_nnzs values = tf.reshape(values, [-1]) row_lengths = tf.reshape(row_lengths, [-1]) x = tf.RaggedTensor.from_row_lengths( values, row_lengths, validate=False ).to_tensor(default_value=-1, shape=[None, value]) elem[f"{multihot_key}"] = x features = get_features_keys() elem = { key: ( tf.reshape(value, [-1, 1]) if "list" not in key else tf.reshape(value, [-1, MULTIHOT_COLUMNS[key]]) ) for key, value in elem.items() if key in features or "list" in key } return elem, label, disp_id def eval_input_fn(files_path, records_batch_size): frames = [] for file in files_path: frames.append(pd.read_parquet(file)) if len(frames) > 1: df = pd.concat(frames) else: df = frames[0] full_df = prepare_df(df) dataset = tf.data.Dataset.from_tensor_slices(dict(full_df)) dataset = dataset.batch(batch_size=records_batch_size, drop_remainder=False) dataset = dataset.map(map_func=partial(_filter_batch), num_parallel_calls=None) dataset = dataset.prefetch(buffer_size=2) return dataset
TensorFlow/Segmentation/UNet_Medical/examples
examples
unet_8GPU
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script launches U-Net run in FP32 on 8 GPUs and trains for 6400 iterations with batch_size 8. Usage: # bash unet_FP32_8GPU.sh <path to dataset> <path to results directory> horovodrun -np 8 python main.py --data_dir $1 --model_dir $2 --log_every 100 --max_steps 6400 --batch_size 8 --exec_mode train_and_evaluate --crossvalidation_idx 0 --augment --xla
TensorFlow2/LanguageModeling/BERT/official/utils/data
data
file_io
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Convenience functions for managing dataset file buffers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import atexit import multiprocessing import os import tempfile import uuid import numpy as np import six import tensorflow as tf class _GarbageCollector(object): """Deletes temporary buffer files at exit. Certain tasks (such as NCF Recommendation) require writing buffers to temporary files. (Which may be local or distributed.) It is not generally safe to delete these files during operation, but they should be cleaned up. This class keeps track of temporary files created, and deletes them at exit. """ def __init__(self): self.temp_buffers = [] def register(self, filepath): self.temp_buffers.append(filepath) def purge(self): try: for i in self.temp_buffers: if tf.io.gfile.exists(i): tf.io.gfile.remove(i) tf.compat.v1.logging.info("Buffer file {} removed".format(i)) except Exception as e: tf.compat.v1.logging.error("Failed to cleanup buffer files: {}".format(e)) _GARBAGE_COLLECTOR = _GarbageCollector() atexit.register(_GARBAGE_COLLECTOR.purge) _ROWS_PER_CORE = 50000 def write_to_temp_buffer(dataframe, buffer_folder, columns): if buffer_folder is None: _, buffer_path = tempfile.mkstemp() else: tf.io.gfile.makedirs(buffer_folder) buffer_path = os.path.join(buffer_folder, str(uuid.uuid4())) _GARBAGE_COLLECTOR.register(buffer_path) return write_to_buffer(dataframe, buffer_path, columns) def iter_shard_dataframe(df, rows_per_core=1000): """Two way shard of a dataframe. This function evenly shards a dataframe so that it can be mapped efficiently. It yields a list of dataframes with length equal to the number of CPU cores, with each dataframe having rows_per_core rows. (Except for the last batch which may have fewer rows in the dataframes.) Passing vectorized inputs to a multiprocessing pool is much more effecient than iterating through a dataframe in serial and passing a list of inputs to the pool. Args: df: Pandas dataframe to be sharded. rows_per_core: Number of rows in each shard. Returns: A list of dataframe shards. """ n = len(df) num_cores = min([multiprocessing.cpu_count(), n]) num_blocks = int(np.ceil(n / num_cores / rows_per_core)) max_batch_size = num_cores * rows_per_core for i in range(num_blocks): min_index = i * max_batch_size max_index = min([(i + 1) * max_batch_size, n]) df_shard = df[min_index:max_index] n_shard = len(df_shard) boundaries = np.linspace(0, n_shard, num_cores + 1, dtype=np.int64) yield [df_shard[boundaries[j]:boundaries[j+1]] for j in range(num_cores)] def _shard_dict_to_examples(shard_dict): """Converts a dict of arrays into a list of example bytes.""" n = [i for i in shard_dict.values()][0].shape[0] feature_list = [{} for _ in range(n)] for column, values in shard_dict.items(): if len(values.shape) == 1: values = np.reshape(values, values.shape + (1,)) if values.dtype.kind == "i": feature_map = lambda x: tf.train.Feature( int64_list=tf.train.Int64List(value=x)) elif values.dtype.kind == "f": feature_map = lambda x: tf.train.Feature( float_list=tf.train.FloatList(value=x)) else: raise ValueError("Invalid dtype") for i in range(n): feature_list[i][column] = feature_map(values[i]) examples = [ tf.train.Example(features=tf.train.Features(feature=example_features)) for example_features in feature_list ] return [e.SerializeToString() for e in examples] def _serialize_shards(df_shards, columns, pool, writer): """Map sharded dataframes to bytes, and write them to a buffer. Args: df_shards: A list of pandas dataframes. (Should be of similar size) columns: The dataframe columns to be serialized. pool: A multiprocessing pool to serialize in parallel. writer: A TFRecordWriter to write the serialized shards. """ # Pandas does not store columns of arrays as nd arrays. stack remedies this. map_inputs = [{c: np.stack(shard[c].values, axis=0) for c in columns} for shard in df_shards] # Failure within pools is very irksome. Thus, it is better to thoroughly check # inputs in the main process. for inp in map_inputs: # Check that all fields have the same number of rows. assert len(set([v.shape[0] for v in inp.values()])) == 1 for val in inp.values(): assert hasattr(val, "dtype") assert hasattr(val.dtype, "kind") assert val.dtype.kind in ("i", "f") assert len(val.shape) in (1, 2) shard_bytes = pool.map(_shard_dict_to_examples, map_inputs) for s in shard_bytes: for example in s: writer.write(example) def write_to_buffer(dataframe, buffer_path, columns, expected_size=None): """Write a dataframe to a binary file for a dataset to consume. Args: dataframe: The pandas dataframe to be serialized. buffer_path: The path where the serialized results will be written. columns: The dataframe columns to be serialized. expected_size: The size in bytes of the serialized results. This is used to lazily construct the buffer. Returns: The path of the buffer. """ if (tf.io.gfile.exists(buffer_path) and tf.io.gfile.stat(buffer_path).length > 0): actual_size = tf.io.gfile.stat(buffer_path).length if expected_size == actual_size: return buffer_path tf.compat.v1.logging.warning( "Existing buffer {} has size {}. Expected size {}. Deleting and " "rebuilding buffer.".format(buffer_path, actual_size, expected_size)) tf.io.gfile.remove(buffer_path) if dataframe is None: raise ValueError( "dataframe was None but a valid existing buffer was not found.") tf.io.gfile.makedirs(os.path.split(buffer_path)[0]) tf.compat.v1.logging.info("Constructing TFRecordDataset buffer: {}" .format(buffer_path)) count = 0 pool = multiprocessing.Pool(multiprocessing.cpu_count()) try: with tf.io.TFRecordWriter(buffer_path) as writer: for df_shards in iter_shard_dataframe(df=dataframe, rows_per_core=_ROWS_PER_CORE): _serialize_shards(df_shards, columns, pool, writer) count += sum([len(s) for s in df_shards]) tf.compat.v1.logging.info("{}/{} examples written." .format(str(count).ljust(8), len(dataframe))) finally: pool.terminate() tf.compat.v1.logging.info("Buffer write complete.") return buffer_path
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/backbone
backbone
resnet
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. """ Variant of the resnet module that takes cfg as an argument. Example usage. Strings may be specified in the config file. model = ResNet( "StemWithFixedBatchNorm", "BottleneckWithFixedBatchNorm", "ResNet50StagesTo4", ) OR: model = ResNet( "StemWithGN", "BottleneckWithGN", "ResNet50StagesTo4", ) Custom implementations may be written in user code and hooked in via the `register_*` functions. """ from collections import namedtuple import torch import torch.nn.functional as F from torch import nn from maskrcnn_benchmark.layers import FrozenBatchNorm2d from maskrcnn_benchmark.layers import Conv2d from maskrcnn_benchmark.modeling.make_layers import group_norm from maskrcnn_benchmark.utils.registry import Registry # ResNet stage specification StageSpec = namedtuple( "StageSpec", [ "index", # Index of the stage, eg 1, 2, ..,. 5 "block_count", # Numer of residual blocks in the stage "return_features", # True => return the last feature map from this stage ], ) # ----------------------------------------------------------------------------- # Standard ResNet models # ----------------------------------------------------------------------------- # ResNet-50 (including all stages) ResNet50StagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, False), (4, 3, True)) ) # ResNet-50 up to stage 4 (excludes stage 5) ResNet50StagesTo4 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 6, True)) ) # ResNet-101 (including all stages) ResNet101StagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, False), (4, 3, True)) ) # ResNet-101 up to stage 4 (excludes stage 5) ResNet101StagesTo4 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, False), (2, 4, False), (3, 23, True)) ) # ResNet-50-FPN (including all stages) ResNet50FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 6, True), (4, 3, True)) ) # ResNet-101-FPN (including all stages) ResNet101FPNStagesTo5 = tuple( StageSpec(index=i, block_count=c, return_features=r) for (i, c, r) in ((1, 3, True), (2, 4, True), (3, 23, True), (4, 3, True)) ) class ResNet(nn.Module): def __init__(self, cfg): super(ResNet, self).__init__() # If we want to use the cfg in forward(), then we should make a copy # of it and store it for later use: # self.cfg = cfg.clone() # Translate string names to implementations stem_module = _STEM_MODULES[cfg.MODEL.RESNETS.STEM_FUNC] stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY] transformation_module = _TRANSFORMATION_MODULES[cfg.MODEL.RESNETS.TRANS_FUNC] # Construct the stem module self.stem = stem_module(cfg) # Constuct the specified ResNet stages num_groups = cfg.MODEL.RESNETS.NUM_GROUPS width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS stage2_bottleneck_channels = num_groups * width_per_group stage2_out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS self.stages = [] self.return_features = {} for stage_spec in stage_specs: name = "layer" + str(stage_spec.index) stage2_relative_factor = 2 ** (stage_spec.index - 1) bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor out_channels = stage2_out_channels * stage2_relative_factor module = _make_stage( transformation_module, in_channels, bottleneck_channels, out_channels, stage_spec.block_count, num_groups, cfg.MODEL.RESNETS.STRIDE_IN_1X1, first_stride=int(stage_spec.index > 1) + 1, ) in_channels = out_channels self.add_module(name, module) self.stages.append(name) self.return_features[name] = stage_spec.return_features # Optionally freeze (requires_grad=False) parts of the backbone self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT) def _freeze_backbone(self, freeze_at): if freeze_at < 0: return for stage_index in range(freeze_at): if stage_index == 0: m = self.stem # stage 0 is the stem else: m = getattr(self, "layer" + str(stage_index)) for p in m.parameters(): p.requires_grad = False def forward(self, x): outputs = [] x = self.stem(x) for stage_name in self.stages: x = getattr(self, stage_name)(x) if self.return_features[stage_name]: outputs.append(x) return outputs class ResNetHead(nn.Module): def __init__( self, block_module, stages, num_groups=1, width_per_group=64, stride_in_1x1=True, stride_init=None, res2_out_channels=256, dilation=1 ): super(ResNetHead, self).__init__() stage2_relative_factor = 2 ** (stages[0].index - 1) stage2_bottleneck_channels = num_groups * width_per_group out_channels = res2_out_channels * stage2_relative_factor in_channels = out_channels // 2 bottleneck_channels = stage2_bottleneck_channels * stage2_relative_factor block_module = _TRANSFORMATION_MODULES[block_module] self.stages = [] stride = stride_init for stage in stages: name = "layer" + str(stage.index) if not stride: stride = int(stage.index > 1) + 1 module = _make_stage( block_module, in_channels, bottleneck_channels, out_channels, stage.block_count, num_groups, stride_in_1x1, first_stride=stride, dilation=dilation ) stride = None self.add_module(name, module) self.stages.append(name) def forward(self, x): for stage in self.stages: x = getattr(self, stage)(x) return x def _make_stage( transformation_module, in_channels, bottleneck_channels, out_channels, block_count, num_groups, stride_in_1x1, first_stride, dilation=1 ): blocks = [] stride = first_stride for _ in range(block_count): blocks.append( transformation_module( in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation=dilation ) ) stride = 1 in_channels = out_channels return nn.Sequential(*blocks) class Bottleneck(nn.Module): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups, stride_in_1x1, stride, dilation, norm_func ): super(Bottleneck, self).__init__() self.downsample = None if in_channels != out_channels: down_stride = stride if dilation == 1 else 1 self.downsample = nn.Sequential( Conv2d( in_channels, out_channels, kernel_size=1, stride=down_stride, bias=False ), norm_func(out_channels), ) for modules in [self.downsample,]: for l in modules.modules(): if isinstance(l, Conv2d): nn.init.kaiming_uniform_(l.weight, a=1) if dilation > 1: stride = 1 # reset to be 1 # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, ) self.bn1 = norm_func(bottleneck_channels) # TODO: specify init for the above self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=dilation, bias=False, groups=num_groups, dilation=dilation ) self.bn2 = norm_func(bottleneck_channels) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False ) self.bn3 = norm_func(out_channels) for l in [self.conv1, self.conv2, self.conv3,]: nn.init.kaiming_uniform_(l.weight, a=1) def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = F.relu_(out) out = self.conv2(out) out = self.bn2(out) out = F.relu_(out) out0 = self.conv3(out) out = self.bn3(out0) if self.downsample is not None: identity = self.downsample(x) out += identity out = F.relu_(out) return out class BaseStem(nn.Module): def __init__(self, cfg, norm_func): super(BaseStem, self).__init__() out_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS self.conv1 = Conv2d( 3, out_channels, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = norm_func(out_channels) for l in [self.conv1,]: nn.init.kaiming_uniform_(l.weight, a=1) def forward(self, x): x = self.conv1(x.float()) x = self.bn1(x) x = F.relu_(x) x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x class BottleneckWithFixedBatchNorm(Bottleneck): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1 ): super(BottleneckWithFixedBatchNorm, self).__init__( in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=FrozenBatchNorm2d ) class StemWithFixedBatchNorm(BaseStem): def __init__(self, cfg): super(StemWithFixedBatchNorm, self).__init__( cfg, norm_func=FrozenBatchNorm2d ) class BottleneckWithGN(Bottleneck): def __init__( self, in_channels, bottleneck_channels, out_channels, num_groups=1, stride_in_1x1=True, stride=1, dilation=1 ): super(BottleneckWithGN, self).__init__( in_channels=in_channels, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, stride_in_1x1=stride_in_1x1, stride=stride, dilation=dilation, norm_func=group_norm ) class StemWithGN(BaseStem): def __init__(self, cfg): super(StemWithGN, self).__init__(cfg, norm_func=group_norm) _TRANSFORMATION_MODULES = Registry({ "BottleneckWithFixedBatchNorm": BottleneckWithFixedBatchNorm, "BottleneckWithGN": BottleneckWithGN, }) _STEM_MODULES = Registry({ "StemWithFixedBatchNorm": StemWithFixedBatchNorm, "StemWithGN": StemWithGN, }) _STAGE_SPECS = Registry({ "R-50-C4": ResNet50StagesTo4, "R-50-C5": ResNet50StagesTo5, "R-101-C4": ResNet101StagesTo4, "R-101-C5": ResNet101StagesTo5, "R-50-FPN": ResNet50FPNStagesTo5, "R-101-FPN": ResNet101FPNStagesTo5, })
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
pluginBuilder
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_PLUGINBUILDER_H #define TT2I_PLUGINBUILDER_H #include "NvInfer.h" #include "trtPtr.h" #include <cstdint> #include <memory> #include <string> #include <vector> namespace tts { class PluginBuilder { public: /** * @brief Create a new PluginBuilder class. * * @param pluginName The name of the plugin. * @param pluginVersion The version of the plugin. */ PluginBuilder(const std::string& pluginName, const std::string& pluginVersion); // delete copy constructor and assignment operator PluginBuilder(const PluginBuilder& other) = delete; PluginBuilder& operator=(const PluginBuilder& other) = delete; /** * @brief Add a field to the plugin. * * @param name The name of the field. * @param data The data for the field. * @param length The length of the field. */ void setField(const std::string& name, const nvinfer1::Weights& weights); /** * @brief Add a scalar field to the plugin. * * @param name The name of the field. * @param value The value of the field. */ void setField(const std::string& name, int32_t value); /** * @brief Add a scalar field to the plugin. * * @param name The name of the field. * @param value The value of the field. */ void setField(const std::string& name, float value); /** * @brief Build the plugin instance. * * @param name The name of the instance. * * @return The instantiated plugin. */ TRTPtr<nvinfer1::IPluginV2> make(const std::string& name); private: union scalar_t { int32_t i; float f; scalar_t(const int32_t value) : i(value) { } scalar_t(const float value) : f(value) { } }; nvinfer1::IPluginCreator* mCreator; std::vector<nvinfer1::PluginField> mFields; // use a set of unique_ptr's, so that as the array is expanded and // reallocated, the memory addresses of the scalars do not change. std::vector<std::unique_ptr<std::string>> mNames; std::vector<std::unique_ptr<scalar_t>> mScalars; /** * @brief Set a field to the plugin. * * @param field The field to set. */ void setField(const nvinfer1::PluginField& field); }; } // namespace tts #endif
PyTorch/SpeechSynthesis/FastPitch/triton/scripts
scripts
process_dataset
#!/usr/bin/env bash # Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e DATASET_DIR="${DATASETS_DIR}/LJSpeech-1.1/LJSpeech-1.1_fastpitch" : ${F0_METHOD:="pyin"} : ${ARGS="--extract-mels"} if [ ! -d "${DATASET_DIR}/mels" ]; then python prepare_dataset.py \ --wav-text-filelists filelists/ljs_audio_text_val.txt \ --n-workers 16 \ --batch-size 1 \ --dataset-path $DATASET_DIR \ --extract-pitch \ --f0-method $F0_METHOD \ $ARGS fi
Tools/PyTorch/TimeSeriesPredictionPlatform/loggers
loggers
log_helper
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import pandas as pd import dllogger from dllogger import JSONStreamBackend, Logger, StdOutBackend from .backends import AggregatorBackend, TensorBoardBackend, AverageMeter from omegaconf import OmegaConf from distributed_utils import is_main_process def jsonlog_2_df(path, keys): with open(path, 'r') as f: log = [json.loads(l[4:]) for l in f.readlines()] log = [l for l in log if l['type'] == 'LOG' and isinstance(l['step'], (int, list))] assert log[-1]['step'] == [], "Logfile is corrupted" log[-1]['step']=log[-2]['step'] # Every log ends with step == [] log = [ { **{k:v for k,v in l.items() if not isinstance(v, dict)}, **(l['data'] if 'data' in l else {}), 'timestamp':float(l['timestamp'])*1000 } for l in log ] log = [{k:v for k,v in l.items() if k in keys} for l in log] df = pd.DataFrame(log) df = df.groupby('step').mean() return df def empty_step_format(step): return "" def empty_prefix_format(timestamp): return "" def no_string_metric_format(metric, metadata, value): unit = metadata["unit"] if "unit" in metadata.keys() else "" format = "{" + metadata["format"] + "}" if "format" in metadata.keys() else "{}" if metric == "String": return "{} {}".format(format.format(value) if value is not None else value, unit) return "{} : {} {}".format(metric, format.format(value) if value is not None else value, unit) def setup_logger(config, resume_training=False): log_filename = config.get("log_filename", "log.json") if is_main_process(): backends = [ TensorBoardBackend(verbosity=dllogger.Verbosity.VERBOSE), JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=log_filename, append=True), AggregatorBackend(verbosity=dllogger.Verbosity.VERBOSE, agg_dict={"loss": AverageMeter}), StdOutBackend( verbosity=dllogger.Verbosity.DEFAULT, step_format=empty_step_format, metric_format=no_string_metric_format, prefix_format=empty_prefix_format, ), ] logger = Logger(backends=backends) else: logger = Logger(backends=[]) container_setup_info = get_framework_env_vars() logger.log(step="PARAMETER", data=container_setup_info, verbosity=dllogger.Verbosity.VERBOSE) if not resume_training: logger.metadata("loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "TRAIN"}) logger.metadata("val_loss", {"unit": "nat", "GOAL": "MINIMIZE", "STAGE": "VAL"}) return logger def restart_logger(config, logger): """An utility function to nealty close every backend holding resources""" for b in logger.backends: if hasattr(b, 'close'): b.close() return setup_logger(config, resume_training=True) def log_parameters(logger, config): model_config = flatten_config(config.model) trainer_config = flatten_config(config.trainer) additional_fields = {'seed': config.seed} logger.log(step="PARAMETER", data={**model_config, **trainer_config, **additional_fields}, verbosity=dllogger.Verbosity.VERBOSE) def flatten_config(config): config = OmegaConf.to_container(config, resolve=True) if '_target_' in config: del config['_target_'] if 'config' in config: c = config['config'] config = {**c, **config} del config['config'] config = pd.json_normalize(config, sep='.') config = config.to_dict(orient='records')[0] return config def get_framework_env_vars(): return { "NVIDIA_PYTORCH_VERSION": os.environ.get("NVIDIA_PYTORCH_VERSION"), "PYTORCH_VERSION": os.environ.get("PYTORCH_VERSION"), "CUBLAS_VERSION": os.environ.get("CUBLAS_VERSION"), "NCCL_VERSION": os.environ.get("NCCL_VERSION"), "CUDA_DRIVER_VERSION": os.environ.get("CUDA_DRIVER_VERSION"), "CUDNN_VERSION": os.environ.get("CUDNN_VERSION"), "CUDA_VERSION": os.environ.get("CUDA_VERSION"), "NVIDIA_PIPELINE_ID": os.environ.get("NVIDIA_PIPELINE_ID"), "NVIDIA_BUILD_ID": os.environ.get("NVIDIA_BUILD_ID"), "NVIDIA_TF32_OVERRIDE": os.environ.get("NVIDIA_TF32_OVERRIDE"), }
TensorFlow2/Recommendation/DLRM_and_DCNv2/tensorflow-dot-based-interact/tensorflow_dot_based_interact/python
python
__init__
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #
TensorFlow2/Classification/ConvNets
ConvNets
README
This repository supports - EfficientNet v1-B0 and v1-B4. Please see [here](efficientnet_v1/README.md) for the complete guide. - EfficientNet v2-S. Please see [here](efficientnet_v2/README.md) for the complete guide.
PyTorch/Translation/Transformer/fairseq/modules/strided_batched_gemm
strided_batched_gemm
strided_batched_gemm_cuda
#pragma once #include <iostream> #include <vector> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_profiler_api.h> #include <cuda_runtime.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include "cutlass/cutlass.h" #include "cutlass/gemm/gemm.h" #include "cutlass/gemm/wmma_gemm_traits.h" namespace { cublasOperation_t convertTransToCublasOperation(char trans) { if (trans == 't') return CUBLAS_OP_T; else if (trans == 'n') return CUBLAS_OP_N; else if (trans == 'c') return CUBLAS_OP_C; else { AT_ERROR("trans must be one of: t, n, c"); return CUBLAS_OP_T; } } void CublasStridedBatchedGemm( char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount, cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP) { cublasOperation_t opa = convertTransToCublasOperation(transa); cublasOperation_t opb = convertTransToCublasOperation(transb); cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle(); cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream(); cublasSetStream(handle, stream); float fAlpha = alpha; float fBeta = beta; // THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH)); TORCH_CUDABLAS_CHECK(cublasGemmStridedBatchedEx( handle, opa, opb, (int)m, (int)n, (int)k, (void *)&fAlpha, a, CUDA_R_16F, (int)lda, strideA, b, CUDA_R_16F, (int)ldb, strideB, (void *)&fBeta, c, CUDA_R_16F, (int)ldc, strideC, (int)batchCount, CUDA_R_32F, algo)); // THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH)); } } // namespace template <cutlass::MatrixLayout::Kind A_LAYOUT, cutlass::MatrixLayout::Kind B_LAYOUT, int SRC_A, int SRC_B, int DST_C> void CutlassGemm_FP32Accum(cudaStream_t stream, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { // printf("CUTLASS-> %c%c M: %ld N: %ld K: %ld %d%d%d LDA: %ld LDB: %ld LDC: // %ld strideA: %ld strideB: %ld strideC: %ld Alpha: %f Beta: %f\n", // ((int)A_LAYOUT == 0 ? 'T' : 'N'), ((int)B_LAYOUT ==0 ? 'T' : 'N'), m, n, k, // SRC_A,SRC_B,DST_C, lda, ldb, ldc, strideA, strideB, strideC, alpha, beta); typedef cutlass::gemm::WmmaGemmTraits< A_LAYOUT, B_LAYOUT, cutlass::Shape<32, 16, 16>, half, half, half, cutlass::gemm::LinearScaling<float>, float, typename cutlass::gemm::WmmaGemmAccumulatorsPerWarp< typename cutlass::Shape<32, 16, 16>>::Shape, typename cutlass::Shape<16, 16, 16>, SRC_A, // kScalarsPerLdgA_ SRC_B, // kScalarsPerLdgB_ SRC_A, // KScalarsPerLdsA_ SRC_B, // KScalarsPerLdsB_ DST_C, // kScalarsPerLdgCAndStgD_ DST_C / 2, // kScalarsPerStsD_ DST_C / 2 // kScalarsPerLdsD_ > WmmaGemmTraits; typedef cutlass::gemm::Gemm<WmmaGemmTraits> Gemm; typename Gemm::Params params; int result = params.initialize( m, // M dimension for each batch n, // N dimension for each batch k, // K dimension for each batch alpha, // scalar alpha a, lda, strideA, // distance in memory between the first element of neighboring // batch b, ldb, strideB, // distance in memory between the first element of neighboring // batch beta, // scalar beta c, // source matrix C ldc, strideC, // distance in memory between the first element of neighboring // batch c, // destination matrix C (may be different memory than source C matrix) ldc, strideC, // distance in memory between the first element of neighboring // batch batchCount); AT_ASSERTM(result == 0, "Failed to initialize CUTLASS Gemm::Params object."); // batchCount in cutlass batched GEMM kernels maps to gridDim.z, which is // limited to 16 bits. To implement batched GEMM with larger batch size, we // fragment it into smaller batched GEMMs of gridDim.z <= 64k long batchesLeft = batchCount; long iterBatchCount = std::min(batchesLeft, static_cast<long>((1 << 16) - 1)); do { // printf("CUTLASS-> %c%c M: %ld N: %ld K: %ld %d%d%d LDA: %ld LDB: %ld LDC: // %ld strideA: %ld strideB: %ld strideC: %ld Alpha: %f Beta: %f // TotalBatches: %ld iterBatchCount %ld\n", ((int)A_LAYOUT == 0 ? 'T' : 'N'), // ((int)B_LAYOUT ==0 ? 'T' : 'N'), m, n, k, SRC_A,SRC_B,DST_C, lda, ldb, // ldc, strideA, strideB, strideC, alpha, beta, batchesLeft, iterBatchCount); int result = params.initialize(m, // M dimension for each batch n, // N dimension for each batch k, // K dimension for each batch alpha, // scalar alpha a, lda, strideA, // distance in memory between the first // element of neighboring batch b, ldb, strideB, // distance in memory between the first // element of neighboring batch beta, // scalar beta c, // source matrix C ldc, strideC, // distance in memory between the first // element of neighboring batch c, // destination matrix C (may be different memory // than source C matrix) ldc, strideC, // distance in memory between the first // element of neighboring batch iterBatchCount); AT_ASSERTM(result == 0, "Failed to initialize CUTLASS Gemm::Params object."); // Launch the CUTLASS GEMM kernel. C10_CUDA_CHECK(Gemm::launch(params, stream)); // Update batched GEMM params based on completed work batchesLeft = batchesLeft - iterBatchCount; a += iterBatchCount * strideA; b += iterBatchCount * strideB; c += iterBatchCount * strideC; ; iterBatchCount = std::min(batchesLeft, static_cast<long>((1 << 16) - 1)); } while (batchesLeft > 0); } namespace { void gemm_switch_fp32accum(char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { auto stream = c10::cuda::getCurrentCUDAStream(); // printf("GEMM -> %c%c M: %i N: %i K: %i Alpha: %f Beta: %f\n", (transa == // 't' ? 'T' : 'N'), (transb =='t' ? 'T' : 'N'), m, n, k, alpha, beta); if ((transa == 't') && (transb == 'n')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kRowMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else if ((transa == 'n') && (transb == 'n')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kColumnMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else if ((transa == 'n') && (transb == 't')) { if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x7)) { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount, CUBLAS_GEMM_ALGO0_TENSOR_OP); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x7) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 8, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x3) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 4, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x7) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 8, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x3) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 4, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x7)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 8>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x3)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 4>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else if (!(lda & 0x1) && !(ldb & 0x1) && !(ldc & 0x1)) { CutlassGemm_FP32Accum<cutlass::MatrixLayout::kColumnMajor, cutlass::MatrixLayout::kRowMajor, 2, 2, 2>( stream, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } else { CublasStridedBatchedGemm(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } else { AT_ASSERTM(false, "TransA and TransB are invalid"); } } void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc) { int transa_ = ((transa == 't') || (transa == 'T')); int transb_ = ((transb == 't') || (transb == 'T')); // Note: leading dimensions generally are checked that they are > 0 and at // least as big the result requires (even if the value won't be used). if (n <= 1) *ldc = std::max<int64_t>(m, 1); if (transa_) { if (m <= 1) *lda = std::max<int64_t>(k, 1); } else { if (k <= 1) *lda = std::max<int64_t>(m, 1); } if (transb_) { if (k <= 1) *ldb = std::max<int64_t>(n, 1); } else { if (n <= 1) *ldb = std::max<int64_t>(k, 1); } } void HgemmStridedBatched(char transa, char transb, long m, long n, long k, float alpha, const half *a, long lda, long strideA, const half *b, long ldb, long strideB, float beta, half *c, long ldc, long strideC, long batchCount) { if ((m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX)) { AT_ERROR("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, " "batchCount" "with the bound [val] <= %d", INT_MAX); } adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc); gemm_switch_fp32accum(transa, transb, m, n, k, alpha, a, lda, strideA, b, ldb, strideB, beta, c, ldc, strideC, batchCount); } } // namespace at::Tensor strided_batched_gemm_cuda( float beta, at::Tensor in_result, float alpha, at::Tensor batch1, at::Tensor batch2) { bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; at::Tensor result, input1, input2; if (in_result.stride(1) == 1) { transpose_result = false; result = in_result; ldc = result.stride(2); } else if (in_result.stride(2) == 1) { transpose_result = true; at::Tensor swap = batch2; batch2 = batch1; batch1 = swap; result = in_result; ldc = result.stride(1); } else { AT_ASSERTM(false, "result should be contiguous"); } if (batch1.stride(transpose_result ? 2 : 1) == 1 && batch1.stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; input1 = batch1; lda = input1.stride(transpose_result ? 1 : 2); } else if (batch1.stride(transpose_result ? 1 : 2) == 1 && batch1.stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; input1 = batch1; lda = input1.stride(transpose_result ? 2 : 1); } else { AT_ASSERTM(false, "input1 should be contiguous"); } if (batch2.stride(transpose_result ? 2 : 1) == 1 && batch2.stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; input2 = batch2; ldb = input2.stride(transpose_result ? 1 : 2); } else if (batch2.stride(transpose_result ? 1 : 2) == 1 && batch2.stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; input2 = batch2; ldb = input2.stride(transpose_result ? 2 : 1); } else { AT_ASSERTM(false, "input2 should be contiguous"); } int64_t num_batches = result.size(0); HgemmStridedBatched( transpose_batch1, transpose_batch2, result.size(transpose_result ? 2 : 1), result.size(transpose_result ? 1 : 2), input1.size(transpose_result ? 1 : 2), alpha, static_cast<const half*>(input1.data_ptr()), lda, input1.stride(0), static_cast<const half*>(input2.data_ptr()), ldb, input2.stride(0), beta, static_cast<half*>(result.data_ptr()), ldc, result.stride(0), num_batches); return in_result; }
PyTorch/LanguageModeling/Transformer-XL/pytorch
pytorch
mem_transformer
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from utils.log_uniform_sampler import LogUniformSampler from utils.log_uniform_sampler import sample_logits from utils.proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax @torch.jit.script def add_and_scale(tensor1, tensor2, alpha: float): return alpha * (tensor1 + tensor2) class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False): super(PositionwiseFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = self.CoreNet(inp) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def forward(self, h, attn_mask=None, mems=None): # multihead attention # [hlen x bsz x n_head x d_head] if mems is not None: c = torch.cat([mems, h], 0) else: c = h if self.pre_lnorm: # layer normalization c = self.layer_norm(c) head_q = self.q_net(h) head_k, head_v = torch.chunk(self.kv_net(c), 2, -1) head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head) head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head) head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head) # [bsz x n_head x qlen x klen] attn_score = torch.einsum('ibnd,jbnd->bnij', head_q, head_k) attn_score.mul_(self.scale) if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x qlen x klen x n_head] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # [bsz x n_head x qlen x klen] * [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head] attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, head_v) attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = h + attn_out else: # residual connection + layer normalization output = self.layer_norm(h + attn_out) return output class RelMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False): super(RelMultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def _parallelogram_mask(self, h, w, left=False): mask = torch.ones((h, w)).byte() m = min(h, w) mask[:m, :m] = torch.triu(mask[:m, :m]) mask[-m:, -m:] = torch.tril(mask[-m:, -m:]) if left: return mask.bool() else: return mask.flip(0).bool() def _shift(self, x, qlen, klen, mask, left=False): if qlen > 1: zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)), device=x.device, dtype=x.dtype) else: zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype) if left: mask = mask.flip(1) x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1) else: x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1) x = x_padded.masked_select(mask[:, :, None, None]) \ .view(qlen, klen, x.size(2), x.size(3)) return x def _rel_shift(self, x, zero_triu=False): zero_pad = torch.zeros((x.size(0), x.size(1), x.size(2), 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=3) x_padded = x_padded.view(x.size(0), x.size(1), x.size(3) + 1, x.size(2)) x = x_padded.narrow(2, 1, x_padded.size(2) - 1).view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, w, r, attn_mask=None, mems=None): raise NotImplementedError class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs) self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None): qlen, rlen, bsz = w.size(0), r.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head # compute attention score rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen rr_head_q = w_head_q + r_r_bias BD = torch.einsum('ibnd,jnd->bnij', rr_head_q, r_head_k) # bsz x n_head x qlen x klen BD = self._rel_shift(BD) # [bsz x n_head x qlen x klen] attn_score = add_and_scale(AC, BD, self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class RelLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs) def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None): # r_emb: [klen, n_head, d_head], used for term B # r_w_bias: [n_head, d_head], used for term C # r_bias: [klen, n_head], used for term D qlen, bsz = w.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) if klen > r_emb.size(0): r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1) r_emb = torch.cat([r_emb_pad, r_emb], 0) r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1) r_bias = torch.cat([r_bias_pad, r_bias], 0) else: r_emb = r_emb[-klen:] r_bias = r_bias[-klen:] r_bias = r_bias.t() # compute attention score rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen B_ = torch.einsum('ibnd,jnd->bnij', w_head_q, r_emb) # bsz x n_head x qlen x klen D_ = r_bias[None, :, None, :] # 1 x n_head x 1 x klen BD = self._rel_shift(B_ + D_) # [bsz x qlen x klen x n_head] attn_score = add_and_scale(AC, BD, self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class DecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(DecoderLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelLearnableDecoderLayer, self).__init__() self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelPartialLearnableDecoderLayer, self).__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super(AdaptiveEmbedding, self).__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append( nn.Embedding(n_token, d_embed, sparse=(sample_softmax > 0)) ) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed).zero_())) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i).zero_())) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = F.linear(emb_i, self.emb_projs[i]).to(emb_flat.dtype) emb_flat.index_copy_(0, indices_i, emb_i) embed = emb_flat.view(*inp.size(), self.d_proj) embed.mul_(self.emb_scale) return embed class MemTransformerLM(nn.Module): def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner, dropout, dropatt, dtype, tie_weight=True, d_embed=None, div_val=1, tie_projs=[False], pre_lnorm=False, tgt_len=None, ext_len=None, mem_len=None, cutoffs=[], adapt_inp=False, same_length=False, attn_type=0, clamp_len=-1, sample_softmax=-1): super(MemTransformerLM, self).__init__() self.n_token = n_token d_embed = d_model if d_embed is None else d_embed self.d_embed = d_embed self.d_model = d_model self.n_head = n_head self.d_head = d_head self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs, div_val=div_val) self.drop = nn.Dropout(dropout) self.tie_weight = tie_weight self.tie_projs = tie_projs self.div_val = div_val self.n_layer = n_layer self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len self.max_klen = tgt_len + ext_len + mem_len self.attn_type = attn_type self.layers = nn.ModuleList() # the default attention if attn_type == 0: for i in range(n_layer): self.layers.append( RelPartialLearnableDecoderLayer( n_head, d_model, d_head, d_inner, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm) ) # learnable embeddings elif attn_type == 1: for i in range(n_layer): self.layers.append( RelLearnableDecoderLayer( n_head, d_model, d_head, d_inner, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm) ) # absolute embeddings elif attn_type in [2, 3]: for i in range(n_layer): self.layers.append( DecoderLayer( n_head, d_model, d_head, d_inner, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm) ) self.sample_softmax = sample_softmax # use sampled softmax if sample_softmax > 0: self.out_layer = nn.Linear(d_model, n_token) self.tie_weight = tie_weight self.sampler = LogUniformSampler(n_token, sample_softmax) # use adaptive softmax (including standard softmax) else: if tie_weight: emb_layers = [i.weight for i in self.word_emb.emb_layers] else: emb_layers = None emb_projs = self.word_emb.emb_projs self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model, cutoffs, div_val=div_val, tie_projs=tie_projs, out_projs=emb_projs, out_layers_weights=emb_layers) self.same_length = same_length self.clamp_len = clamp_len self._create_params() def backward_compatible(self): self.sample_softmax = -1 def _create_params(self): # default attention if self.attn_type == 0: self.pos_emb = PositionalEmbedding(self.d_model) self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) # learnable elif self.attn_type == 1: self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head, self.d_head).zero_()) self.r_w_bias = nn.Parameter(torch.Tensor( self.n_layer, self.n_head, self.d_head).zero_()) self.r_bias = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head).zero_()) # absolute standard elif self.attn_type == 2: self.pos_emb = PositionalEmbedding(self.d_model) # absolute deeper SA elif self.attn_type == 3: self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.d_model).zero_()) def reset_length(self, tgt_len, ext_len, mem_len): if tgt_len < 1: raise RuntimeError(f'tgt_len should be >= 1, but got {tgt_len}') if ext_len < 0: raise RuntimeError(f'ext_len should be >= 0, but got {ext_len}') if mem_len < 0: raise RuntimeError(f'mem_len should be >= 0, but got {mem_len}') self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len def init_mems(self): if self.mem_len > 0: param = next(self.parameters()) mems = torch.empty(self.n_layer, 0, dtype=param.dtype, device=param.device) return mems else: return None def _update_mems(self, hids, mems, qlen, mlen): # does not deal with None if mems is None: return None # mems is not None assert len(hids) == len(mems), 'len(hids) != len(mems)' # There are `mlen + qlen` steps that can be cached into mems # For the next step, the last `ext_len` of the `qlen` tokens # will be used as the extended context. Hence, we only cache # the tokens from `mlen + qlen - self.ext_len - self.mem_len` # to `mlen + qlen - self.ext_len`. with torch.no_grad(): stacked = torch.stack(hids) if ( self.mem_len == self.tgt_len and self.ext_len == 0 and stacked.size(1) == self.mem_len ): new_mems = stacked.detach() else: end_idx = mlen + max(0, qlen - self.ext_len) beg_idx = max(0, end_idx - self.mem_len) if mems.numel(): cat = torch.cat([mems, stacked], dim=1) else: cat = stacked new_mems = cat[:, beg_idx:end_idx].detach() return new_mems def _forward(self, dec_inp, mems=None): qlen, bsz = dec_inp.size() word_emb = self.word_emb(dec_inp) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones(qlen, klen) mask_len = klen - self.mem_len - 1 if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1+mlen) + torch.tril(all_ones, -mask_shift_len)).bool() else: dec_attn_mask = torch.triu( word_emb.new_ones(qlen, klen), diagonal=1+mlen).bool() hids = [] # default if self.attn_type == 0: pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] core_out = layer(core_out, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) # learnable elif self.attn_type == 1: core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) if self.clamp_len > 0: r_emb = self.r_emb[i][-self.clamp_len:] r_bias = self.r_bias[i][-self.clamp_len:] else: r_emb, r_bias = self.r_emb[i], self.r_bias[i] mems_i = None if mems is None else mems[i] core_out = layer(core_out, r_emb, self.r_w_bias[i], r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) # absolute elif self.attn_type == 2: pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb + pos_emb[-qlen:]) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] if mems_i is not None and len(mems_i) and i == 0: mems_i += pos_emb[:mlen] core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) elif self.attn_type == 3: core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] if mems_i is not None and len(mems_i) and mlen > 0: cur_emb = self.r_emb[i][:-qlen] cur_size = cur_emb.size(0) if cur_size < mlen: cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1) cur_emb = torch.cat([cur_emb_pad, cur_emb], 0) else: cur_emb = cur_emb[-mlen:] mems_i += cur_emb.view(mlen, 1, -1) core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1) core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, qlen, mlen) return core_out, new_mems def forward(self, data, target, mems): # nn.DataParallel does not allow size(0) tensors to be broadcasted. # So, have to initialize size(0) mems inside the model forward. # Moreover, have to return new_mems to allow nn.DataParallel to piece # them together. if mems is None: mems = self.init_mems() tgt_len = target.size(0) hidden, new_mems = self._forward(data, mems=mems) pred_hid = hidden[-tgt_len:] if self.sample_softmax > 0 and self.training: assert self.tie_weight logit = sample_logits(self.word_emb, self.out_layer.bias, target, pred_hid, self.sampler) loss = -F.log_softmax(logit, -1)[:, :, 0] else: loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1)) loss = loss.view(tgt_len, -1) return (loss, new_mems) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='unit test') parser.add_argument('--n_layer', type=int, default=4, help='') parser.add_argument('--n_rel_layer', type=int, default=4, help='') parser.add_argument('--n_head', type=int, default=2, help='') parser.add_argument('--d_head', type=int, default=2, help='') parser.add_argument('--d_model', type=int, default=200, help='') parser.add_argument('--d_embed', type=int, default=200, help='') parser.add_argument('--d_inner', type=int, default=200, help='') parser.add_argument('--dropout', type=float, default=0.0, help='') parser.add_argument('--cuda', action='store_true', help='') parser.add_argument('--seed', type=int, default=1111, help='') parser.add_argument('--multi_gpu', action='store_true', help='') args = parser.parse_args() device = torch.device("cuda" if args.cuda else "cpu") B = 4 tgt_len, mem_len, ext_len = 36, 36, 0 data_len = tgt_len * 20 args.n_token = 10000 import data_utils data = torch.LongTensor(data_len*B).random_(0, args.n_token).to(device) diter = data_utils.LMOrderedIterator(data, B, tgt_len, device=device, ext_len=ext_len) cutoffs = [args.n_token // 2] tie_projs = [False] + [True] * len(cutoffs) for div_val in [1, 2]: for d_embed in [200, 100]: model = MemTransformerLM(args.n_token, args.n_layer, args.n_head, args.d_model, args.d_head, args.d_inner, args.dropout, dropatt=args.dropout, tie_weight=True, d_embed=d_embed, div_val=div_val, tie_projs=tie_projs, pre_lnorm=True, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, cutoffs=cutoffs, attn_type=0, dtype=None).to(device) print(sum(p.numel() for p in model.parameters())) mems = None for idx, (inp, tgt, seqlen, _) in enumerate(diter): print('batch {}'.format(idx)) _, mems = model(inp, tgt, mems)
PyTorch/Recommendation/DLRM/tests/feature_specs
feature_specs
wider_dtypes
channel_spec: categorical: - cat_0.bin - cat_1.bin - cat_2.bin - cat_3.bin - cat_4.bin - cat_5.bin - cat_6.bin - cat_7.bin - cat_8.bin - cat_9.bin - cat_10.bin - cat_11.bin - cat_12.bin - cat_13.bin - cat_14.bin - cat_15.bin - cat_16.bin - cat_17.bin - cat_18.bin - cat_19.bin - cat_20.bin - cat_21.bin - cat_22.bin - cat_23.bin - cat_24.bin - cat_25.bin label: - label numerical: &id001 - num_0 - num_1 - num_2 - num_3 - num_4 - num_5 - num_6 - num_7 - num_8 - num_9 - num_10 - num_11 - num_12 feature_spec: cat_0.bin: cardinality: 100000 dtype: int64 cat_1.bin: cardinality: 100000 dtype: int32 cat_10.bin: cardinality: 100000 dtype: int32 cat_11.bin: cardinality: 100000 dtype: int32 cat_12.bin: cardinality: 100000 dtype: int32 cat_13.bin: cardinality: 100000 dtype: int32 cat_14.bin: cardinality: 100000 dtype: int32 cat_15.bin: cardinality: 100000 dtype: int64 cat_16.bin: cardinality: 100000 dtype: int32 cat_17.bin: cardinality: 100000 dtype: int32 cat_18.bin: cardinality: 100000 dtype: int32 cat_19.bin: cardinality: 100000 dtype: int32 cat_2.bin: cardinality: 100000 dtype: int32 cat_20.bin: cardinality: 100000 dtype: int64 cat_21.bin: cardinality: 100000 dtype: int32 cat_22.bin: cardinality: 100000 dtype: int32 cat_23.bin: cardinality: 100000 dtype: int32 cat_24.bin: cardinality: 100000 dtype: int32 cat_25.bin: cardinality: 100000 dtype: int32 cat_3.bin: cardinality: 100000 dtype: int64 cat_4.bin: cardinality: 100000 dtype: int32 cat_5.bin: cardinality: 100000 dtype: int32 cat_6.bin: cardinality: 100000 dtype: int32 cat_7.bin: cardinality: 100000 dtype: int64 cat_8.bin: cardinality: 100000 dtype: int32 cat_9.bin: cardinality: 100000 dtype: int64 label: dtype: bool num_0: dtype: float16 num_1: dtype: float16 num_10: dtype: float16 num_11: dtype: float16 num_12: dtype: float16 num_2: dtype: float16 num_3: dtype: float16 num_4: dtype: float16 num_5: dtype: float16 num_6: dtype: float16 num_7: dtype: float16 num_8: dtype: float16 num_9: dtype: float16 metadata: {} source_spec: test: - features: *id001 files: - test/numerical.bin type: split_binary - features: - label files: - test/label.bin type: split_binary - features: - cat_0.bin files: - test/cat_0.bin type: split_binary - features: - cat_1.bin files: - test/cat_1.bin type: split_binary - features: - cat_2.bin files: - test/cat_2.bin type: split_binary - features: - cat_3.bin files: - test/cat_3.bin type: split_binary - features: - cat_4.bin files: - test/cat_4.bin type: split_binary - features: - cat_5.bin files: - test/cat_5.bin type: split_binary - features: - cat_6.bin files: - test/cat_6.bin type: split_binary - features: - cat_7.bin files: - test/cat_7.bin type: split_binary - features: - cat_8.bin files: - test/cat_8.bin type: split_binary - features: - cat_9.bin files: - test/cat_9.bin type: split_binary - features: - cat_10.bin files: - test/cat_10.bin type: split_binary - features: - cat_11.bin files: - test/cat_11.bin type: split_binary - features: - cat_12.bin files: - test/cat_12.bin type: split_binary - features: - cat_13.bin files: - test/cat_13.bin type: split_binary - features: - cat_14.bin files: - test/cat_14.bin type: split_binary - features: - cat_15.bin files: - test/cat_15.bin type: split_binary - features: - cat_16.bin files: - test/cat_16.bin type: split_binary - features: - cat_17.bin files: - test/cat_17.bin type: split_binary - features: - cat_18.bin files: - test/cat_18.bin type: split_binary - features: - cat_19.bin files: - test/cat_19.bin type: split_binary - features: - cat_20.bin files: - test/cat_20.bin type: split_binary - features: - cat_21.bin files: - test/cat_21.bin type: split_binary - features: - cat_22.bin files: - test/cat_22.bin type: split_binary - features: - cat_23.bin files: - test/cat_23.bin type: split_binary - features: - cat_24.bin files: - test/cat_24.bin type: split_binary - features: - cat_25.bin files: - test/cat_25.bin type: split_binary train: - features: *id001 files: - train/numerical.bin type: split_binary - features: - label files: - train/label.bin type: split_binary - features: - cat_0.bin files: - train/cat_0.bin type: split_binary - features: - cat_1.bin files: - train/cat_1.bin type: split_binary - features: - cat_2.bin files: - train/cat_2.bin type: split_binary - features: - cat_3.bin files: - train/cat_3.bin type: split_binary - features: - cat_4.bin files: - train/cat_4.bin type: split_binary - features: - cat_5.bin files: - train/cat_5.bin type: split_binary - features: - cat_6.bin files: - train/cat_6.bin type: split_binary - features: - cat_7.bin files: - train/cat_7.bin type: split_binary - features: - cat_8.bin files: - train/cat_8.bin type: split_binary - features: - cat_9.bin files: - train/cat_9.bin type: split_binary - features: - cat_10.bin files: - train/cat_10.bin type: split_binary - features: - cat_11.bin files: - train/cat_11.bin type: split_binary - features: - cat_12.bin files: - train/cat_12.bin type: split_binary - features: - cat_13.bin files: - train/cat_13.bin type: split_binary - features: - cat_14.bin files: - train/cat_14.bin type: split_binary - features: - cat_15.bin files: - train/cat_15.bin type: split_binary - features: - cat_16.bin files: - train/cat_16.bin type: split_binary - features: - cat_17.bin files: - train/cat_17.bin type: split_binary - features: - cat_18.bin files: - train/cat_18.bin type: split_binary - features: - cat_19.bin files: - train/cat_19.bin type: split_binary - features: - cat_20.bin files: - train/cat_20.bin type: split_binary - features: - cat_21.bin files: - train/cat_21.bin type: split_binary - features: - cat_22.bin files: - train/cat_22.bin type: split_binary - features: - cat_23.bin files: - train/cat_23.bin type: split_binary - features: - cat_24.bin files: - train/cat_24.bin type: split_binary - features: - cat_25.bin files: - train/cat_25.bin type: split_binary
TensorFlow2/Segmentation/Contrib/UNet3P
UNet3P
benchmark_inference
""" Script to benchmark model throughput and latency """ import os import numpy as np from tqdm import tqdm from timeit import default_timer as timer import hydra from omegaconf import DictConfig import tensorflow as tf from tensorflow.keras import mixed_precision from data_generators import tf_data_generator from utils.general_utils import join_paths, suppress_warnings from utils.images_utils import postprocess_mask from models.model import prepare_model def benchmark_time(cfg: DictConfig): """ Output throughput and latency """ # suppress TensorFlow and DALI warnings suppress_warnings() if cfg.OPTIMIZATION.AMP: print("Enabling Automatic Mixed Precision(AMP)") policy = mixed_precision.Policy('mixed_float16') mixed_precision.set_global_policy(policy) if cfg.OPTIMIZATION.XLA: print("Enabling Accelerated Linear Algebra(XLA)") tf.config.optimizer.set_jit(True) # data generator val_generator = tf_data_generator.DataGenerator(cfg, mode="VAL") validation_steps = val_generator.__len__() warmup_steps, bench_steps = 50, 100 if "warmup_steps" in cfg.keys(): warmup_steps = cfg.warmup_steps if "bench_steps" in cfg.keys(): bench_steps = cfg.bench_steps validation_steps = min(validation_steps, (warmup_steps + bench_steps)) progress_bar = tqdm(total=validation_steps) # create model model = prepare_model(cfg) # weights model path checkpoint_path = join_paths( cfg.WORK_DIR, cfg.CALLBACKS.MODEL_CHECKPOINT.PATH, f"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5" ) assert os.path.exists(checkpoint_path), \ f"Model weight's file does not exist at \n{checkpoint_path}" # load model weights model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True) # model.summary() time_taken = [] # for each batch for i, (batch_images, batch_mask) in enumerate(val_generator): start_time = timer() # make prediction on batch batch_predictions = model.predict_on_batch(batch_images) if len(model.outputs) > 1: batch_predictions = batch_predictions[0] # do postprocessing on predicted mask batch_predictions = postprocess_mask(batch_predictions, cfg.OUTPUT.CLASSES) time_taken.append(timer() - start_time) progress_bar.update(1) if i >= validation_steps: break progress_bar.close() mean_time = np.mean(time_taken[warmup_steps:]) # skipping warmup_steps throughput = (cfg.HYPER_PARAMETERS.BATCH_SIZE / mean_time) print(f"Latency: {round(mean_time * 1e3, 2)} msec") print(f"Throughput/FPS: {round(throughput, 2)} samples/sec") @hydra.main(version_base=None, config_path="configs", config_name="config") def main(cfg: DictConfig): """ Read config file and pass to benchmark_time method """ benchmark_time(cfg) if __name__ == "__main__": main()
TensorFlow2/Segmentation/UNet_Medical
UNet_Medical
download_dataset
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os PARSER = argparse.ArgumentParser(description="U-Net medical") PARSER.add_argument('--data_dir', type=str, default='./data', help="""Directory where to download the dataset""") def main(): FLAGS = PARSER.parse_args() if not os.path.exists(FLAGS.data_dir): os.makedirs(FLAGS.data_dir) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-volume.tif -P {}'.format(FLAGS.data_dir)) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/train-labels.tif -P {}'.format(FLAGS.data_dir)) os.system('wget http://brainiac2.mit.edu/isbi_challenge/sites/default/files/test-volume.tif -P {}'.format(FLAGS.data_dir)) print("Finished downloading files for U-Net medical to {}".format(FLAGS.data_dir)) if __name__ == '__main__': main()
TensorFlow2/Detection/Efficientdet/object_detection
object_detection
matcher
# Copyright 2020 Google Research. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Matcher interface and Match class. This module defines the Matcher interface and the Match object. The job of the matcher is to match row and column indices based on the similarity matrix and other optional parameters. Each column is matched to at most one row. There are three possibilities for the matching: 1) match: A column matches a row. 2) no_match: A column does not match any row. 3) ignore: A column that is neither 'match' nor no_match. The ignore case is regularly encountered in object detection: when an anchor has a relatively small overlap with a ground-truth box, one neither wants to consider this box a positive example (match) nor a negative example (no match). The Match class is used to store the match results and it provides simple apis to query the results. """ import abc import tensorflow.compat.v1 as tf class Match(object): """Class to store results from the matcher. This class is used to store the results from the matcher. It provides convenient methods to query the matching results. """ def __init__(self, match_results): """Constructs a Match object. Args: match_results: Integer tensor of shape [N] with (1) match_results[i]>=0, meaning that column i is matched with row match_results[i]. (2) match_results[i]=-1, meaning that column i is not matched. (3) match_results[i]=-2, meaning that column i is ignored. Raises: ValueError: if match_results does not have rank 1 or is not an integer int32 scalar tensor """ if match_results.shape.ndims != 1: raise ValueError('match_results should have rank 1') if match_results.dtype != tf.int32: raise ValueError('match_results should be an int32 or int64 scalar ' 'tensor') self._match_results = match_results @property def match_results(self): """The accessor for match results. Returns: the tensor which encodes the match results. """ return self._match_results def matched_column_indices(self): """Returns column indices that match to some row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1))) def matched_column_indicator(self): """Returns column indices that are matched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.greater_equal(self._match_results, 0) def num_matched_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.shape(self.matched_column_indices())[0] def unmatched_column_indices(self): """Returns column indices that do not match any row. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1))) def unmatched_column_indicator(self): """Returns column indices that are unmatched. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return tf.equal(self._match_results, -1) def num_unmatched_columns(self): """Returns number (int32 scalar tensor) of unmatched columns.""" return tf.shape(self.unmatched_column_indices())[0] def ignored_column_indices(self): """Returns column indices that are ignored (neither Matched nor Unmatched). The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(self.ignored_column_indicator())) def ignored_column_indicator(self): """Returns boolean column indicator where True means the column is ignored. Returns: column_indicator: boolean vector which is True for all ignored column indices. """ return tf.equal(self._match_results, -2) def num_ignored_columns(self): """Returns number (int32 scalar tensor) of matched columns.""" return tf.shape(self.ignored_column_indices())[0] def unmatched_or_ignored_column_indices(self): """Returns column indices that are unmatched or ignored. The indices returned by this op are always sorted in increasing order. Returns: column_indices: int32 tensor of shape [K] with column indices. """ return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results))) def matched_row_indices(self): """Returns row indices that match some column. The indices returned by this op are ordered so as to be in correspondence with the output of matched_column_indicator(). For example if self.matched_column_indicator() is [0,2], and self.matched_row_indices() is [7, 3], then we know that column 0 was matched to row 7 and column 2 was matched to row 3. Returns: row_indices: int32 tensor of shape [K] with row indices. """ return self._reshape_and_cast( tf.gather(self._match_results, self.matched_column_indices())) def _reshape_and_cast(self, t): return tf.cast(tf.reshape(t, [-1]), tf.int32) def gather_based_on_match(self, input_tensor, unmatched_value, ignored_value): """Gathers elements from `input_tensor` based on match results. For columns that are matched to a row, gathered_tensor[col] is set to input_tensor[match_results[col]]. For columns that are unmatched, gathered_tensor[col] is set to unmatched_value. Finally, for columns that are ignored gathered_tensor[col] is set to ignored_value. Note that the input_tensor.shape[1:] must match with unmatched_value.shape and ignored_value.shape Args: input_tensor: Tensor to gather values from. unmatched_value: Constant tensor value for unmatched columns. ignored_value: Constant tensor value for ignored columns. Returns: gathered_tensor: A tensor containing values gathered from input_tensor. The shape of the gathered tensor is [match_results.shape[0]] + input_tensor.shape[1:]. """ input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]), input_tensor], axis=0) gather_indices = tf.maximum(self.match_results + 2, 0) gathered_tensor = tf.gather(input_tensor, gather_indices) return gathered_tensor class Matcher(object): """Abstract base class for matcher. """ __metaclass__ = abc.ABCMeta def match(self, similarity_matrix, scope=None, **params): """Computes matches among row and column indices and returns the result. Computes matches among the row and column indices based on the similarity matrix and optional arguments. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. scope: Op scope name. Defaults to 'Match' if None. **params: Additional keyword arguments for specific implementations of the Matcher. Returns: A Match object with the results of matching. """ with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope: return Match(self._match(similarity_matrix, **params)) @abc.abstractmethod def _match(self, similarity_matrix, **params): """Method to be overridden by implementations. Args: similarity_matrix: Float tensor of shape [N, M] with pairwise similarity where higher value means more similar. **params: Additional keyword arguments for specific implementations of the Matcher. Returns: match_results: Integer tensor of shape [M]: match_results[i]>=0 means that column i is matched to row match_results[i], match_results[i]=-1 means that the column is not matched. match_results[i]=-2 means that the column is ignored (usually this happens when there is a very weak match which one neither wants as positive nor negative example). """ pass
TensorFlow/Detection/SSD/models/research/slim/nets
nets
pix2pix_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= """Tests for pix2pix.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow as tf from nets import pix2pix class GeneratorTest(tf.test.TestCase): def _reduced_default_blocks(self): """Returns the default blocks, scaled down to make test run faster.""" return [pix2pix.Block(b.num_filters // 32, b.decoder_keep_prob) for b in pix2pix._default_generator_blocks()] def test_output_size_nn_upsample_conv(self): batch_size = 2 height, width = 256, 256 num_outputs = 4 images = tf.ones((batch_size, height, width, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): logits, _ = pix2pix.pix2pix_generator( images, num_outputs, blocks=self._reduced_default_blocks(), upsample_method='nn_upsample_conv') with self.test_session() as session: session.run(tf.global_variables_initializer()) np_outputs = session.run(logits) self.assertListEqual([batch_size, height, width, num_outputs], list(np_outputs.shape)) def test_output_size_conv2d_transpose(self): batch_size = 2 height, width = 256, 256 num_outputs = 4 images = tf.ones((batch_size, height, width, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): logits, _ = pix2pix.pix2pix_generator( images, num_outputs, blocks=self._reduced_default_blocks(), upsample_method='conv2d_transpose') with self.test_session() as session: session.run(tf.global_variables_initializer()) np_outputs = session.run(logits) self.assertListEqual([batch_size, height, width, num_outputs], list(np_outputs.shape)) def test_block_number_dictates_number_of_layers(self): batch_size = 2 height, width = 256, 256 num_outputs = 4 images = tf.ones((batch_size, height, width, 3)) blocks = [ pix2pix.Block(64, 0.5), pix2pix.Block(128, 0), ] with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): _, end_points = pix2pix.pix2pix_generator( images, num_outputs, blocks) num_encoder_layers = 0 num_decoder_layers = 0 for end_point in end_points: if end_point.startswith('encoder'): num_encoder_layers += 1 elif end_point.startswith('decoder'): num_decoder_layers += 1 self.assertEqual(num_encoder_layers, len(blocks)) self.assertEqual(num_decoder_layers, len(blocks)) class DiscriminatorTest(tf.test.TestCase): def _layer_output_size(self, input_size, kernel_size=4, stride=2, pad=2): return (input_size + pad * 2 - kernel_size) // stride + 1 def test_four_layers(self): batch_size = 2 input_size = 256 output_size = self._layer_output_size(input_size) output_size = self._layer_output_size(output_size) output_size = self._layer_output_size(output_size) output_size = self._layer_output_size(output_size, stride=1) output_size = self._layer_output_size(output_size, stride=1) images = tf.ones((batch_size, input_size, input_size, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): logits, end_points = pix2pix.pix2pix_discriminator( images, num_filters=[64, 128, 256, 512]) self.assertListEqual([batch_size, output_size, output_size, 1], logits.shape.as_list()) self.assertListEqual([batch_size, output_size, output_size, 1], end_points['predictions'].shape.as_list()) def test_four_layers_no_padding(self): batch_size = 2 input_size = 256 output_size = self._layer_output_size(input_size, pad=0) output_size = self._layer_output_size(output_size, pad=0) output_size = self._layer_output_size(output_size, pad=0) output_size = self._layer_output_size(output_size, stride=1, pad=0) output_size = self._layer_output_size(output_size, stride=1, pad=0) images = tf.ones((batch_size, input_size, input_size, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): logits, end_points = pix2pix.pix2pix_discriminator( images, num_filters=[64, 128, 256, 512], padding=0) self.assertListEqual([batch_size, output_size, output_size, 1], logits.shape.as_list()) self.assertListEqual([batch_size, output_size, output_size, 1], end_points['predictions'].shape.as_list()) def test_four_layers_wrog_paddig(self): batch_size = 2 input_size = 256 images = tf.ones((batch_size, input_size, input_size, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): with self.assertRaises(TypeError): pix2pix.pix2pix_discriminator( images, num_filters=[64, 128, 256, 512], padding=1.5) def test_four_layers_negative_padding(self): batch_size = 2 input_size = 256 images = tf.ones((batch_size, input_size, input_size, 3)) with tf.contrib.framework.arg_scope(pix2pix.pix2pix_arg_scope()): with self.assertRaises(ValueError): pix2pix.pix2pix_discriminator( images, num_filters=[64, 128, 256, 512], padding=-1) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechRecognition/wav2vec2/scripts
scripts
inference
#!/usr/bin/env bash # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -e : ${DATASET_DIR:="/datasets/LibriSpeech"} : ${VALID_SUBSET:="test-other"} : ${OUTPUT_DIR:="results/inference"} : ${NUM_GPUS:=1} : ${BATCH_SIZE:=8} : ${AMP:=false} : ${BF16:=false} : ${FP16:=false} : ${EMA:=0.0} : ${SEED:=1} : ${FINETUNED_MODEL:=results/finetune_base_960h/wav2vec2_update320000.pt} : ${MASK_PROB:=0.5} : ${MASK_CHANNEL_PROB:=0.25} : ${DISTRIBUTED:="-m torch.distributed.launch --nproc_per_node=$NUM_GPUS"} # inference : ${MAX_DURATION:=""} : ${NUM_STEPS:=0} : ${NUM_WARMUP_STEPS:=0} : ${CPU:=false} : ${LOGITS_FILE:=} : ${PREDICTION_FILE:="${OUTPUT_DIR}/${DATASET}.predictions"} : ${TORCHSCRIPT:=false} : ${TORCHSCRIPT_SAVE:=false} : ${LOG_FILE:=$OUTPUT_DIR/nvlog.json} mkdir -p "$OUTPUT_DIR" ARGS+=" --w2v_path $FINETUNED_MODEL" ARGS+=" --data $DATASET_DIR" ARGS+=" --valid_subset $VALID_SUBSET" ARGS+=" --output_dir $OUTPUT_DIR" ARGS+=" --ema $EMA" ARGS+=" --seed $SEED" ARGS+=" --skip_invalid_size_inputs_valid_test" ARGS+=" --apply_mask" ARGS+=" --mask_prob $MASK_PROB" ARGS+=" --mask_channel_prob $MASK_CHANNEL_PROB" ARGS+=" --mask_channel_length 64" ARGS+=" --encoder_layerdrop 0.1" # NOTE This is called `layerdrop` in fairseq finetuning yamls ARGS+=" --activation_dropout 0.1" ARGS+=" --feature_grad_mult 0.0" ARGS+=" --batch_size=$BATCH_SIZE" ARGS+=" --steps $NUM_STEPS" ARGS+=" --warmup_steps $NUM_WARMUP_STEPS" [ "$AMP" = true ] && ARGS+=" --amp --fp16" [ "$BF16" = true ] && ARGS+=" --bf16" [ "$TORCHSCRIPT" = true ] && ARGS+=" --torchscript" [ "$TORCHSCRIPT_SAVE" = true ] && ARGS+=" --torchscript_export" [ -n "$LOG_FILE" ] && ARGS+=" --log_file $LOG_FILE" [ "$CPU" == "true" ] && ARGS+=" --cpu" [ -n "$MAX_DURATION" ] && ARGS+=" --max_duration ${MAX_DURATION}" set -x if [ $NUM_GPUS -gt 1 ]; then python3 -m torch.distributed.launch --nproc_per_node=$NUM_GPUS inference.py $ARGS $@ else python3 inference.py $ARGS $@ fi
Tools/PyTorch/TimeSeriesPredictionPlatform/examples
examples
seed_sweep
/# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. python launch_training.py \ -m \ seed='range(1,9)' \ model=tft \ dataset=electricity \ trainer/criterion=quantile \ trainer.config.num_epochs=3 \ hydra/callbacks=[merge_logs] \ hydra/launcher=joblib \ hydra.launcher.n_jobs=8
PyTorch/SpeechRecognition/Jasper/triton/model_repo_configs/fp16/jasper-onnx
jasper-onnx
config
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of NVIDIA CORPORATION nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. name: "jasper-onnx" platform: "onnxruntime_onnx" default_model_filename: "model.onnx" max_batch_size: 8#MAX_BATCH input [ { name: "input__0" data_type: TYPE_FP16 dims: [64, -1] } ] output [ { name: "output__0" data_type: TYPE_FP16 dims: [-1, 29 ] } ] instance_group { count: 1#NUM_ENGINES gpus: 0 kind: KIND_GPU } #db#dynamic_batching { #db# preferred_batch_size: 8#MAX_BATCH #db# max_queue_delay_microseconds: #MAX_QUEUE #db#}
TensorFlow/Detection/SSD/models/research/object_detection/dataset_tools
dataset_tools
create_coco_tf_record_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Test for create_coco_tf_record.py.""" import io import json import os import numpy as np import PIL.Image import tensorflow as tf from object_detection.dataset_tools import create_coco_tf_record class CreateCocoTFRecordTest(tf.test.TestCase): def _assertProtoEqual(self, proto_field, expectation): """Helper function to assert if a proto field equals some value. Args: proto_field: The protobuf field to compare. expectation: The expected value of the protobuf field. """ proto_list = [p for p in proto_field] self.assertListEqual(proto_list, expectation) def test_create_tf_example(self): image_file_name = 'tmp_image.jpg' image_data = np.random.rand(256, 256, 3) tmp_dir = self.get_temp_dir() save_path = os.path.join(tmp_dir, image_file_name) image = PIL.Image.fromarray(image_data, 'RGB') image.save(save_path) image = { 'file_name': image_file_name, 'height': 256, 'width': 256, 'id': 11, } annotations_list = [{ 'area': .5, 'iscrowd': False, 'image_id': 11, 'bbox': [64, 64, 128, 128], 'category_id': 2, 'id': 1000, }] image_dir = tmp_dir category_index = { 1: { 'name': 'dog', 'id': 1 }, 2: { 'name': 'cat', 'id': 2 }, 3: { 'name': 'human', 'id': 3 } } (_, example, num_annotations_skipped) = create_coco_tf_record.create_tf_example( image, annotations_list, image_dir, category_index) self.assertEqual(num_annotations_skipped, 0) self._assertProtoEqual( example.features.feature['image/height'].int64_list.value, [256]) self._assertProtoEqual( example.features.feature['image/width'].int64_list.value, [256]) self._assertProtoEqual( example.features.feature['image/filename'].bytes_list.value, [image_file_name]) self._assertProtoEqual( example.features.feature['image/source_id'].bytes_list.value, [str(image['id'])]) self._assertProtoEqual( example.features.feature['image/format'].bytes_list.value, ['jpeg']) self._assertProtoEqual( example.features.feature['image/object/bbox/xmin'].float_list.value, [0.25]) self._assertProtoEqual( example.features.feature['image/object/bbox/ymin'].float_list.value, [0.25]) self._assertProtoEqual( example.features.feature['image/object/bbox/xmax'].float_list.value, [0.75]) self._assertProtoEqual( example.features.feature['image/object/bbox/ymax'].float_list.value, [0.75]) self._assertProtoEqual( example.features.feature['image/object/class/text'].bytes_list.value, ['cat']) def test_create_tf_example_with_instance_masks(self): image_file_name = 'tmp_image.jpg' image_data = np.random.rand(8, 8, 3) tmp_dir = self.get_temp_dir() save_path = os.path.join(tmp_dir, image_file_name) image = PIL.Image.fromarray(image_data, 'RGB') image.save(save_path) image = { 'file_name': image_file_name, 'height': 8, 'width': 8, 'id': 11, } annotations_list = [{ 'area': .5, 'iscrowd': False, 'image_id': 11, 'bbox': [0, 0, 8, 8], 'segmentation': [[4, 0, 0, 0, 0, 4], [8, 4, 4, 8, 8, 8]], 'category_id': 1, 'id': 1000, }] image_dir = tmp_dir category_index = { 1: { 'name': 'dog', 'id': 1 }, } (_, example, num_annotations_skipped) = create_coco_tf_record.create_tf_example( image, annotations_list, image_dir, category_index, include_masks=True) self.assertEqual(num_annotations_skipped, 0) self._assertProtoEqual( example.features.feature['image/height'].int64_list.value, [8]) self._assertProtoEqual( example.features.feature['image/width'].int64_list.value, [8]) self._assertProtoEqual( example.features.feature['image/filename'].bytes_list.value, [image_file_name]) self._assertProtoEqual( example.features.feature['image/source_id'].bytes_list.value, [str(image['id'])]) self._assertProtoEqual( example.features.feature['image/format'].bytes_list.value, ['jpeg']) self._assertProtoEqual( example.features.feature['image/object/bbox/xmin'].float_list.value, [0]) self._assertProtoEqual( example.features.feature['image/object/bbox/ymin'].float_list.value, [0]) self._assertProtoEqual( example.features.feature['image/object/bbox/xmax'].float_list.value, [1]) self._assertProtoEqual( example.features.feature['image/object/bbox/ymax'].float_list.value, [1]) self._assertProtoEqual( example.features.feature['image/object/class/text'].bytes_list.value, ['dog']) encoded_mask_pngs = [ io.BytesIO(encoded_masks) for encoded_masks in example.features.feature[ 'image/object/mask'].bytes_list.value ] pil_masks = [ np.array(PIL.Image.open(encoded_mask_png)) for encoded_mask_png in encoded_mask_pngs ] self.assertTrue(len(pil_masks) == 1) self.assertAllEqual(pil_masks[0], [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]]) def test_create_sharded_tf_record(self): tmp_dir = self.get_temp_dir() image_paths = ['tmp1_image.jpg', 'tmp2_image.jpg'] for image_path in image_paths: image_data = np.random.rand(256, 256, 3) save_path = os.path.join(tmp_dir, image_path) image = PIL.Image.fromarray(image_data, 'RGB') image.save(save_path) images = [{ 'file_name': image_paths[0], 'height': 256, 'width': 256, 'id': 11, }, { 'file_name': image_paths[1], 'height': 256, 'width': 256, 'id': 12, }] annotations = [{ 'area': .5, 'iscrowd': False, 'image_id': 11, 'bbox': [64, 64, 128, 128], 'category_id': 2, 'id': 1000, }] category_index = [{ 'name': 'dog', 'id': 1 }, { 'name': 'cat', 'id': 2 }, { 'name': 'human', 'id': 3 }] groundtruth_data = {'images': images, 'annotations': annotations, 'categories': category_index} annotation_file = os.path.join(tmp_dir, 'annotation.json') with open(annotation_file, 'w') as annotation_fid: json.dump(groundtruth_data, annotation_fid) output_path = os.path.join(tmp_dir, 'out.record') create_coco_tf_record._create_tf_record_from_coco_annotations( annotation_file, tmp_dir, output_path, False, 2) self.assertTrue(os.path.exists(output_path + '-00000-of-00002')) self.assertTrue(os.path.exists(output_path + '-00001-of-00002')) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechRecognition/wav2vec2/common/fairseq/modules
modules
same_pad
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from torch import nn class SamePad(nn.Module): def __init__(self, kernel_size, causal=False): super().__init__() if causal: self.remove = kernel_size - 1 else: self.remove = 1 if kernel_size % 2 == 0 else 0 def forward(self, x): if self.remove > 0: x = x[:, :, : -self.remove] return x
Tools/PyTorch/TimeSeriesPredictionPlatform/conf/model
model
lstm
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. _target_: models.lstm.LSTM config: hidden_size: 128 dropout: 0.1 missing_data_strategy: 'mask' defaults: - _self_ - /trainer@_global_/trainer: ctltrainer
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/plugins/taco2AttentionPlugin
taco2AttentionPlugin
taco2AttentionLayerPlugin
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "taco2AttentionLayerPlugin.h" #include "taco2AttentionLayerKernel.h" #include "taco2Utils.h" #include <cassert> #include <cstdlib> #include <cstring> #include <cuda_runtime.h> // cudaError_t #include <iostream> #include <sstream> #include <stdexcept> #include <string> namespace nvinfer1 { namespace plugin { using value_type = Taco2AttentionLayerPlugin::value_type; /****************************************************************************** * CONSTANTS ****************************************************************** *****************************************************************************/ namespace { constexpr const char* const PLUGIN_NAME = "Taco2Attention"; constexpr const char* const PLUGIN_VERSION = "0.1.0"; } // namespace /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { std::vector<value_type> toVector(const Weights& weights) { if (weights.type != DataType::kFLOAT) { throw std::runtime_error( "Invalid data type for Attention weights: " + std::to_string(static_cast<int>(weights.type))); } const value_type* const valuesBegin = static_cast<const value_type*>(weights.values); const value_type* const valuesEnd = valuesBegin + weights.count; return std::vector<value_type>(valuesBegin, valuesEnd); } const void* offset(const void* ptr, const size_t offset) { return reinterpret_cast<const void*>(static_cast<const uint8_t*>(ptr) + offset); } } // namespace /****************************************************************************** * STATIC METHODS ************************************************************* *****************************************************************************/ const char* Taco2AttentionLayerPlugin::getName() { return PLUGIN_NAME; } const char* Taco2AttentionLayerPlugin::getVersion() { return PLUGIN_VERSION; } Taco2AttentionLayerPlugin Taco2AttentionLayerPlugin::deserialize(const void* const data, const size_t length) { static constexpr const size_t numDims = 5; if (length < numDims * sizeof(int32_t)) { throw std::runtime_error("Invalid serialized size: " + std::to_string(length)); } const int numEncodingDimension = static_cast<const int32_t*>(data)[0]; const int numQueryDimension = static_cast<const int32_t*>(data)[1]; const int numFilters = static_cast<const int32_t*>(data)[2]; const int convKernelSize = static_cast<const int32_t*>(data)[3]; const int numAttentionDimension = static_cast<const int32_t*>(data)[4]; const int numQueryWeights = numQueryDimension * numAttentionDimension; const int numConvWeights = numFilters * 2 * convKernelSize; const int numLocationWeights = numFilters * numAttentionDimension; const int numEnergyWeights = numAttentionDimension; const size_t reqSize = numDims * sizeof(int32_t) + sizeof(value_type) * (numQueryWeights + numConvWeights + numLocationWeights + numEnergyWeights); if (reqSize != length) { throw std::runtime_error( "Invalid serialized size: " + std::to_string(length) + " / " + std::to_string(reqSize)); } const Weights queryWeights{DataType::kFLOAT, offset(data, numDims * sizeof(int32_t)), numQueryWeights}; const Weights convWeights{ DataType::kFLOAT, offset(queryWeights.values, sizeof(value_type) * numQueryWeights), numConvWeights}; const Weights locationWeights{ DataType::kFLOAT, offset(convWeights.values, sizeof(value_type) * numConvWeights), numLocationWeights}; const Weights energyWeights{ DataType::kFLOAT, offset(locationWeights.values, sizeof(value_type) * numLocationWeights), numEnergyWeights}; return Taco2AttentionLayerPlugin(numEncodingDimension, numQueryDimension, numFilters, convKernelSize, numAttentionDimension, queryWeights, convWeights, locationWeights, energyWeights); } /****************************************************************************** * CONSTRUCTORS / DESTRUCTOR ************************************************** *****************************************************************************/ Taco2AttentionLayerPlugin::Taco2AttentionLayerPlugin(int encDimension, int queryDimension, int numFilters, int convKernelSize, int attDimension, const nvinfer1::Weights& queryWeights, const nvinfer1::Weights& convWeights, const nvinfer1::Weights& locationWeights, const nvinfer1::Weights& energyWeights) : mNumEncodingDimension(encDimension) , mNumQueryDimension(queryDimension) , mNumFilters(numFilters) , mConvKernelSize(convKernelSize) , mNumAttentionDimension(attDimension) , mQueryWeightsHost(toVector(queryWeights)) , mConvWeightsHost(toVector(convWeights)) , mLocationWeightsHost(toVector(locationWeights)) , mEnergyWeightsHost(toVector(energyWeights)) , mKernel(nullptr) , mNamespace() { const size_t expectedQueryWeights = mNumQueryDimension * mNumAttentionDimension; const size_t expectedConvWeights = mNumFilters * mConvKernelSize * 2; const size_t expectedLocationWeights = mNumFilters * mNumAttentionDimension; const size_t expectedEnergyWeights = mNumAttentionDimension; if (mQueryWeightsHost.size() != expectedQueryWeights) { throw std::runtime_error("Attention expected " + std::to_string(expectedQueryWeights) + " query weights but given " + std::to_string(mQueryWeightsHost.size())); } if (mConvWeightsHost.size() != expectedConvWeights) { throw std::runtime_error("Attention expected " + std::to_string(expectedConvWeights) + " conv weights but given " + std::to_string(mConvWeightsHost.size())); } if (mLocationWeightsHost.size() != expectedLocationWeights) { throw std::runtime_error("Attention expected " + std::to_string(expectedLocationWeights) + " location weights but given " + std::to_string(mLocationWeightsHost.size())); } if (mEnergyWeightsHost.size() != expectedEnergyWeights) { throw std::runtime_error("Attention expected " + std::to_string(expectedEnergyWeights) + " energy weights but given " + std::to_string(mEnergyWeightsHost.size())); } } Taco2AttentionLayerPlugin::Taco2AttentionLayerPlugin(Taco2AttentionLayerPlugin&& other) : mNumEncodingDimension(other.mNumEncodingDimension) , mNumQueryDimension(other.mNumQueryDimension) , mNumFilters(other.mNumFilters) , mConvKernelSize(other.mConvKernelSize) , mNumAttentionDimension(other.mNumAttentionDimension) , mQueryWeightsHost(std::move(other.mQueryWeightsHost)) , mConvWeightsHost(std::move(other.mConvWeightsHost)) , mLocationWeightsHost(std::move(other.mLocationWeightsHost)) , mEnergyWeightsHost(std::move(other.mEnergyWeightsHost)) , mKernel(std::move(other.mKernel)) , mNamespace(std::move(other.mNamespace)) { other.mNumEncodingDimension = 0; other.mNumQueryDimension = 0; other.mNumFilters = 0; other.mConvKernelSize = 0; other.mNumAttentionDimension = 0; } Taco2AttentionLayerPlugin::~Taco2AttentionLayerPlugin() { destroy(); } /****************************************************************************** * PUBLIC METHODS ************************************************************* *****************************************************************************/ Taco2AttentionLayerPlugin& Taco2AttentionLayerPlugin::operator=(Taco2AttentionLayerPlugin&& other) { // defere to constructor *this = Taco2AttentionLayerPlugin(std::move(other)); return *this; } DataType Taco2AttentionLayerPlugin::getOutputDataType( const int /* index */, const DataType* const /* inputTypes */, const int /* nbInputs */) const { return DataType::kFLOAT; } const char* Taco2AttentionLayerPlugin::getPluginType() const { return getName(); } const char* Taco2AttentionLayerPlugin::getPluginVersion() const { return getVersion(); } int Taco2AttentionLayerPlugin::getNbOutputs() const { return 2; } DimsExprs Taco2AttentionLayerPlugin::getOutputDimensions( const int outputIndex, const DimsExprs* inputs, const int nbInputs, IExprBuilder& exprBuilder) { if (outputIndex >= getNbOutputs()) { throw std::runtime_error( "Invalid output index: " + std::to_string(outputIndex) + " / " + std::to_string(getNbOutputs()) + "."); } if (nbInputs != NUM_INPUTS) { throw std::runtime_error( "Can only handle " + std::to_string(NUM_INPUTS) + " input tensors: " + std::to_string(nbInputs)); } if (outputIndex == CONTEXT_OUTPUT) { return DimsExprs{ 3, {inputs[MEMORY_INDEX].d[0], exprBuilder.constant(1), exprBuilder.constant(mNumEncodingDimension)}}; } else if (outputIndex == WEIGHT_OUTPUT) { return DimsExprs{3, {inputs[MEMORY_INDEX].d[0], exprBuilder.constant(2), inputs[MEMORY_INDEX].d[1]}}; } else { throw std::runtime_error("Unknown output index: " + std::to_string(outputIndex)); } } bool Taco2AttentionLayerPlugin::supportsFormatCombination( const int pos, const PluginTensorDesc* const inOut, const int /* nbInputs */, const int /* nbOutputs */) { return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT; } void Taco2AttentionLayerPlugin::configurePlugin(const DynamicPluginTensorDesc* const in, const int nbInputs, const DynamicPluginTensorDesc* const out, const int nbOutputs) { if (nbInputs != NUM_INPUTS) { throw std::runtime_error( "Can only handle " + std::to_string(NUM_INPUTS) + " input tensors: " + std::to_string(nbInputs)); } for (int i = 0; i < nbInputs; ++i) { if (in[i].desc.type != DataType::kFLOAT) { throw std::runtime_error("Only FLOAT supported as input " + std::to_string(i) + " : " + std::to_string(static_cast<int>(in[i].desc.type))); } } // assert dimensions if (in[MEMORY_INDEX].desc.dims.d[2] != mNumEncodingDimension) { throw std::runtime_error("Memory input must be L x " + std::to_string(mNumEncodingDimension) + " but got " + taco2::Taco2Utils::dimsToString(in[MEMORY_INDEX].desc.dims)); } if (in[PROCESSED_MEMORY_INDEX].desc.dims.d[2] != mNumAttentionDimension) { throw std::runtime_error("Processed Memory input must be L x " + std::to_string(mNumAttentionDimension) + " but got " + taco2::Taco2Utils::dimsToString(in[PROCESSED_MEMORY_INDEX].desc.dims)); } if (in[WEIGHT_INDEX].desc.dims.d[1] != 2) { throw std::runtime_error( "Weights input must be 2 x L but got " + taco2::Taco2Utils::dimsToString(in[WEIGHT_INDEX].desc.dims)); } if (taco2::Taco2Utils::getDimensionsSize(in[ATTENTION_HIDDEN_INDEX].desc.dims) != static_cast<size_t>(mNumQueryDimension)) { throw std::runtime_error("Attention hidden input must be " + std::to_string(mNumQueryDimension) + " but got " + taco2::Taco2Utils::dimsToString(in[ATTENTION_HIDDEN_INDEX].desc.dims) + " (" + std::to_string(taco2::Taco2Utils::getDimensionsSize(in[ATTENTION_HIDDEN_INDEX].desc.dims)) + ")."); } if (nbOutputs != NUM_OUTPUTS) { throw std::runtime_error("Only two outputs is implemented: " + std::to_string(nbOutputs)); } for (int i = 0; i < nbOutputs; ++i) { if (out[i].desc.type != DataType::kFLOAT) { throw std::runtime_error("Only FLOAT supported as output: " + std::to_string(i) + " : " + std::to_string(static_cast<int>(out[i].desc.type))); } } } int Taco2AttentionLayerPlugin::initialize() { try { mKernel.reset( new Taco2AttentionLayerKernel(mQueryWeightsHost, mConvWeightsHost, mLocationWeightsHost, mEnergyWeightsHost, mNumEncodingDimension, mNumQueryDimension, mNumFilters, mConvKernelSize, mNumAttentionDimension)); } catch (const std::exception& e) { std::cerr << "Taco2AttentionLayerPlugin initialization failed: " << e.what() << std::endl; return 1; } return 0; } void Taco2AttentionLayerPlugin::terminate() { mKernel.reset(); } size_t Taco2AttentionLayerPlugin::getWorkspaceSize( const PluginTensorDesc* const in, const int nbInputs, const PluginTensorDesc* const /* out */, const int /* nbOutputs */) const { if (nbInputs != NUM_INPUTS) { throw std::runtime_error("Invalid number of inputs: " + std::to_string(nbInputs) + ", but expected " + std::to_string(NUM_INPUTS)); } const int inputLength = in[MEMORY_INDEX].dims.d[1]; const int batchSize = in[MEMORY_INDEX].dims.d[0]; // space for queryOutput (num attention dimensions), // convOutput (input length*num filters), elemSum (input length), and // energyScratch (inputLength). const size_t numWorkspaceElements = mNumAttentionDimension + // query output (inputLength * mNumFilters) + // conv output (mNumAttentionDimension * inputLength) + // elem sum inputLength; // enery scratch return numWorkspaceElements * sizeof(value_type) * batchSize; } int Taco2AttentionLayerPlugin::enqueue(const PluginTensorDesc* const inputDesc, const PluginTensorDesc* /* outputDesc */, const void* const* const inputs, void* const* const outputs, void* const workspace, cudaStream_t stream) { const int inputLength = inputDesc[MEMORY_INDEX].dims.d[1]; const int batchSize = inputDesc[MEMORY_INDEX].dims.d[0]; if (batchSize != 1) { // we only support batch size of 1 right now std::cerr << "Taco2AttentionLayerPlugin plugin does not support batch size other than " "1: got " << batchSize << std::endl; std::cerr << "Recompile without plugins to use a larger batch size." << std::endl; return 1; } // name inputs and outputs const value_type* const memoryDevice = static_cast<const value_type*>(inputs[MEMORY_INDEX]); const value_type* const processedMemoryDevice = static_cast<const value_type*>(inputs[PROCESSED_MEMORY_INDEX]); const value_type* const weightsDevice = static_cast<const value_type*>(inputs[WEIGHT_INDEX]); const value_type* const attentionHiddenDevice = static_cast<const value_type*>(inputs[ATTENTION_HIDDEN_INDEX]); value_type* const outputContextDevice = static_cast<value_type*>(outputs[CONTEXT_OUTPUT]); value_type* const outputWeightsDevice = static_cast<value_type*>(outputs[WEIGHT_OUTPUT]); try { mKernel->execute(memoryDevice, processedMemoryDevice, weightsDevice, attentionHiddenDevice, outputContextDevice, outputWeightsDevice, inputLength, static_cast<value_type*>(workspace), stream); } catch (const std::exception& e) { std::cerr << "Taco2AttentionLayerPlugin failed: " << e.what() << std::endl; return 1; } return 0; } size_t Taco2AttentionLayerPlugin::getSerializationSize() const { const int numQueryWeights = mNumQueryDimension * mNumAttentionDimension; const int numConvWeights = mNumFilters * 2 * mConvKernelSize; const int numLocationWeights = mNumFilters * mNumAttentionDimension; const int numEnergyWeights = mNumAttentionDimension; return 5 * sizeof(int32_t) + sizeof(value_type) * (numQueryWeights + numConvWeights + numLocationWeights + numEnergyWeights); } void Taco2AttentionLayerPlugin::serialize(void* const buffer) const { static_cast<int32_t*>(buffer)[0] = mNumEncodingDimension; static_cast<int32_t*>(buffer)[1] = mNumQueryDimension; static_cast<int32_t*>(buffer)[2] = mNumFilters; static_cast<int32_t*>(buffer)[3] = mConvKernelSize; static_cast<int32_t*>(buffer)[4] = mNumAttentionDimension; float* const queryWeights = reinterpret_cast<float*>(static_cast<int32_t*>(buffer) + 5); float* const convWeights = queryWeights + mQueryWeightsHost.size(); float* const locationWeights = convWeights + mConvWeightsHost.size(); float* const energyWeights = locationWeights + mLocationWeightsHost.size(); memcpy(queryWeights, mQueryWeightsHost.data(), sizeof(value_type) * mQueryWeightsHost.size()); memcpy(convWeights, mConvWeightsHost.data(), sizeof(value_type) * mConvWeightsHost.size()); memcpy(locationWeights, mLocationWeightsHost.data(), sizeof(value_type) * mLocationWeightsHost.size()); memcpy(energyWeights, mEnergyWeightsHost.data(), sizeof(value_type) * mEnergyWeightsHost.size()); } void Taco2AttentionLayerPlugin::destroy() { terminate(); } IPluginV2DynamicExt* Taco2AttentionLayerPlugin::clone() const { // call constructor which copy's data Taco2AttentionLayerPlugin clone(mNumEncodingDimension, mNumQueryDimension, mNumFilters, mConvKernelSize, mNumAttentionDimension, Weights{DataType::kFLOAT, mQueryWeightsHost.data(), static_cast<int64_t>(mQueryWeightsHost.size())}, Weights{DataType::kFLOAT, mConvWeightsHost.data(), static_cast<int64_t>(mConvWeightsHost.size())}, Weights{DataType::kFLOAT, mLocationWeightsHost.data(), static_cast<int64_t>(mLocationWeightsHost.size())}, Weights{DataType::kFLOAT, mEnergyWeightsHost.data(), static_cast<int64_t>(mEnergyWeightsHost.size())}); if (mKernel) { // initialize the clone too clone.initialize(); } // move it to the heap last to avoid exceptions causing memory leaks return new Taco2AttentionLayerPlugin(std::move(clone)); } void Taco2AttentionLayerPlugin::setPluginNamespace(const char* pluginNamespace) { mNamespace = pluginNamespace; } const char* Taco2AttentionLayerPlugin::getPluginNamespace() const { return mNamespace.c_str(); } } // namespace plugin } // namespace nvinfer1
PyTorch/Recommendation/DLRM/dlrm/data
data
samplers
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from torch.utils.data import RandomSampler from dlrm.utils.distributed import get_local_rank class RandomDistributedSampler(RandomSampler): _SAMPLE_FILE = "/tmp/dlrm_training_sample.npy" def __iter__(self): """ To guarantee all ranks have the same same permutation, generating it from rank 0 and sync to other rank by writing to disk """ if get_local_rank() == 0: np.save(self._SAMPLE_FILE, np.array(list(super().__iter__()))) torch.distributed.barrier() sample = np.load(self._SAMPLE_FILE) return iter(sample)
TensorFlow/Detection/SSD/models/research/object_detection/predictors/heads
heads
class_head
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Class Head. Contains Class prediction head classes for different meta architectures. All the class prediction heads have a predict function that receives the `features` as the first argument and returns class predictions with background. """ import functools import tensorflow as tf from object_detection.predictors.heads import head slim = tf.contrib.slim class MaskRCNNClassHead(head.Head): """Mask RCNN class prediction head. Please refer to Mask RCNN paper: https://arxiv.org/abs/1703.06870 """ def __init__(self, is_training, num_class_slots, fc_hyperparams_fn, use_dropout, dropout_keep_prob): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. fc_hyperparams_fn: A function to generate tf-slim arg_scope with hyperparameters for fully connected ops. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. """ super(MaskRCNNClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._fc_hyperparams_fn = fc_hyperparams_fn self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob def predict(self, features, num_predictions_per_location=1): """Predicts boxes and class scores. Args: features: A float tensor of shape [batch_size, height, width, channels] containing features for a batch of images. num_predictions_per_location: Int containing number of predictions per location. Returns: class_predictions_with_background: A float tensor of shape [batch_size, 1, num_class_slots] representing the class predictions for the proposals. Raises: ValueError: If num_predictions_per_location is not 1. """ if num_predictions_per_location != 1: raise ValueError('Only num_predictions_per_location=1 is supported') spatial_averaged_roi_pooled_features = tf.reduce_mean( features, [1, 2], keep_dims=True, name='AvgPool') flattened_roi_pooled_features = slim.flatten( spatial_averaged_roi_pooled_features) if self._use_dropout: flattened_roi_pooled_features = slim.dropout( flattened_roi_pooled_features, keep_prob=self._dropout_keep_prob, is_training=self._is_training) with slim.arg_scope(self._fc_hyperparams_fn()): class_predictions_with_background = slim.fully_connected( flattened_roi_pooled_features, self._num_class_slots, activation_fn=None, scope='ClassPredictor') class_predictions_with_background = tf.reshape( class_predictions_with_background, [-1, 1, self._num_class_slots]) return class_predictions_with_background class ConvolutionalClassHead(head.Head): """Convolutional class prediction head.""" def __init__(self, is_training, num_class_slots, use_dropout, dropout_keep_prob, kernel_size, apply_sigmoid_to_scores=False, class_prediction_bias_init=0.0, use_depthwise=False): """Constructor. Args: is_training: Indicates whether the BoxPredictor is in training mode. num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. use_dropout: Option to use dropout or not. Note that a single dropout op is applied here prior to both box and class predictions, which stands in contrast to the ConvolutionalBoxPredictor below. dropout_keep_prob: Keep probability for dropout. This is only used if use_dropout is True. kernel_size: Size of final convolution kernel. If the spatial resolution of the feature map is smaller than the kernel size, then the kernel size is automatically set to be min(feature_width, feature_height). apply_sigmoid_to_scores: if True, apply the sigmoid on the output class_predictions. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. Raises: ValueError: if min_depth > max_depth. """ super(ConvolutionalClassHead, self).__init__() self._is_training = is_training self._num_class_slots = num_class_slots self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._kernel_size = kernel_size self._apply_sigmoid_to_scores = apply_sigmoid_to_scores self._class_prediction_bias_init = class_prediction_bias_init self._use_depthwise = use_depthwise def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A float tensors of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ net = features if self._use_dropout: net = slim.dropout(net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: class_predictions_with_background = slim.separable_conv2d( net, None, [self._kernel_size, self._kernel_size], padding='SAME', depth_multiplier=1, stride=1, rate=1, scope='ClassPredictor_depthwise') class_predictions_with_background = slim.conv2d( class_predictions_with_background, num_predictions_per_location * self._num_class_slots, [1, 1], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='ClassPredictor') else: class_predictions_with_background = slim.conv2d( net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, normalizer_fn=None, normalizer_params=None, scope='ClassPredictor', biases_initializer=tf.constant_initializer( self._class_prediction_bias_init)) if self._apply_sigmoid_to_scores: class_predictions_with_background = tf.sigmoid( class_predictions_with_background) batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background # TODO(alirezafathi): See if possible to unify Weight Shared with regular # convolutional class head. class WeightSharedConvolutionalClassHead(head.Head): """Weight shared convolutional class prediction head. This head allows sharing the same set of parameters (weights) when called more then once on different feature maps. """ def __init__(self, num_class_slots, kernel_size=3, class_prediction_bias_init=0.0, use_dropout=False, dropout_keep_prob=0.8, use_depthwise=False, score_converter_fn=tf.identity): """Constructor. Args: num_class_slots: number of class slots. Note that num_class_slots may or may not include an implicit background category. kernel_size: Size of final convolution kernel. class_prediction_bias_init: constant value to initialize bias of the last conv2d layer before class prediction. use_dropout: Whether to apply dropout to class prediction head. dropout_keep_prob: Probability of keeping activiations. use_depthwise: Whether to use depthwise convolutions for prediction steps. Default is False. score_converter_fn: Callable elementwise nonlinearity (that takes tensors as inputs and returns tensors). """ super(WeightSharedConvolutionalClassHead, self).__init__() self._num_class_slots = num_class_slots self._kernel_size = kernel_size self._class_prediction_bias_init = class_prediction_bias_init self._use_dropout = use_dropout self._dropout_keep_prob = dropout_keep_prob self._use_depthwise = use_depthwise self._score_converter_fn = score_converter_fn def predict(self, features, num_predictions_per_location): """Predicts boxes. Args: features: A float tensor of shape [batch_size, height, width, channels] containing image features. num_predictions_per_location: Number of box predictions to be made per spatial location. Returns: class_predictions_with_background: A tensor of shape [batch_size, num_anchors, num_class_slots] representing the class predictions for the proposals. """ class_predictions_net = features if self._use_dropout: class_predictions_net = slim.dropout( class_predictions_net, keep_prob=self._dropout_keep_prob) if self._use_depthwise: conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1) else: conv_op = slim.conv2d class_predictions_with_background = conv_op( class_predictions_net, num_predictions_per_location * self._num_class_slots, [self._kernel_size, self._kernel_size], activation_fn=None, stride=1, padding='SAME', normalizer_fn=None, biases_initializer=tf.constant_initializer( self._class_prediction_bias_init), scope='ClassPredictor') batch_size = features.get_shape().as_list()[0] if batch_size is None: batch_size = tf.shape(features)[0] class_predictions_with_background = self._score_converter_fn( class_predictions_with_background) class_predictions_with_background = tf.reshape( class_predictions_with_background, [batch_size, -1, self._num_class_slots]) return class_predictions_with_background
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/modeling/roi_heads/box_head
box_head
box_head
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import torch from torch import nn from .roi_box_feature_extractors import make_roi_box_feature_extractor from .roi_box_predictors import make_roi_box_predictor from .inference import make_roi_box_post_processor from .loss import make_roi_box_loss_evaluator class ROIBoxHead(torch.nn.Module): """ Generic Box Head class. """ def __init__(self, cfg): super(ROIBoxHead, self).__init__() self.feature_extractor = make_roi_box_feature_extractor(cfg) self.predictor = make_roi_box_predictor(cfg) self.post_processor = make_roi_box_post_processor(cfg) self.loss_evaluator = make_roi_box_loss_evaluator(cfg) def forward(self, features, proposals, targets=None): """ Arguments: features (list[Tensor]): feature-maps from possibly several levels proposals (list[BoxList]): proposal boxes targets (list[BoxList], optional): the ground-truth targets. Returns: x (Tensor): the result of the feature extractor proposals (list[BoxList]): during training, the subsampled proposals are returned. During testing, the predicted boxlists are returned losses (dict[Tensor]): During training, returns the losses for the head. During testing, returns an empty dict. """ if self.training: # Faster R-CNN subsamples during training the proposals with a fixed # positive / negative ratio with torch.no_grad(): proposals = self.loss_evaluator.subsample(proposals, targets) # extract features that will be fed to the final classifier. The # feature_extractor generally corresponds to the pooler + heads x = self.feature_extractor(features, proposals) # final classifier that converts the features into predictions class_logits, box_regression = self.predictor(x) if not self.training: result = self.post_processor((class_logits, box_regression), proposals) return x, result, {} loss_classifier, loss_box_reg = self.loss_evaluator( [class_logits], [box_regression] ) return ( x, proposals, dict(loss_classifier=loss_classifier, loss_box_reg=loss_box_reg), ) def build_roi_box_head(cfg): """ Constructs a new box head. By default, uses ROIBoxHead, but if it turns out not to be enough, just register a new class and make it a parameter in the config """ return ROIBoxHead(cfg)
TensorFlow2/LanguageModeling/BERT/official/nlp/modeling/networks
networks
transformer_encoder
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Transformer-based text encoder network.""" from __future__ import absolute_import from __future__ import division # from __future__ import google_type_annotations from __future__ import print_function import tensorflow as tf from official.modeling import activations from official.nlp.modeling import layers @tf.keras.utils.register_keras_serializable(package='Text') class TransformerEncoder(tf.keras.Model): """Bi-directional Transformer-based encoder network. This network implements a bi-directional Transformer-based encoder as described in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding" (https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks. The default values for this object are taken from the BERT-Base implementation in "BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding". Attributes: vocab_size: The size of the token vocabulary. hidden_size: The size of the transformer hidden layers. num_layers: The number of transformer layers. num_attention_heads: The number of attention heads for each transformer. The hidden size must be divisible by the number of attention heads. sequence_length: The sequence length that this encoder expects. If None, the sequence length is dynamic; if an integer, the encoder will require sequences padded to this length. max_sequence_length: The maximum sequence length that this encoder can consume. If None, max_sequence_length uses the value from sequence length. This determines the variable shape for positional embeddings. type_vocab_size: The number of types that the 'type_ids' input can take. intermediate_size: The intermediate size for the transformer layers. activation: The activation to use for the transformer layers. dropout_rate: The dropout rate to use for the transformer layers. attention_dropout_rate: The dropout rate to use for the attention layers within the transformer layers. initializer: The initialzer to use for all weights in this encoder. float_dtype: The dtype of this encoder. Can be 'float32' or 'float16'. """ def __init__(self, vocab_size, hidden_size=768, num_layers=12, num_attention_heads=12, sequence_length=512, max_sequence_length=None, type_vocab_size=16, intermediate_size=3072, activation=activations.gelu, dropout_rate=0.1, attention_dropout_rate=0.1, initializer=tf.keras.initializers.TruncatedNormal(stddev=0.02), float_dtype='float32', **kwargs): activation = tf.keras.activations.get(activation) initializer = tf.keras.initializers.get(initializer) if not max_sequence_length: max_sequence_length = sequence_length self._self_setattr_tracking = False self._config_dict = { 'vocab_size': vocab_size, 'hidden_size': hidden_size, 'num_layers': num_layers, 'num_attention_heads': num_attention_heads, 'sequence_length': sequence_length, 'max_sequence_length': max_sequence_length, 'type_vocab_size': type_vocab_size, 'intermediate_size': intermediate_size, 'activation': tf.keras.activations.serialize(activation), 'dropout_rate': dropout_rate, 'attention_dropout_rate': attention_dropout_rate, 'initializer': tf.keras.initializers.serialize(initializer), 'float_dtype': float_dtype, } word_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_word_ids') mask = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_mask') type_ids = tf.keras.layers.Input( shape=(sequence_length,), dtype=tf.int32, name='input_type_ids') self._embedding_layer = layers.OnDeviceEmbedding( vocab_size=vocab_size, embedding_width=hidden_size, initializer=initializer, name='word_embeddings') word_embeddings = self._embedding_layer(word_ids) # Always uses dynamic slicing for simplicity. self._position_embedding_layer = layers.PositionEmbedding( initializer=initializer, use_dynamic_slicing=True, max_sequence_length=max_sequence_length) position_embeddings = self._position_embedding_layer(word_embeddings) type_embeddings = ( layers.OnDeviceEmbedding( vocab_size=type_vocab_size, embedding_width=hidden_size, initializer=initializer, use_one_hot=True, name='type_embeddings')(type_ids)) embeddings = tf.keras.layers.Add()( [word_embeddings, position_embeddings, type_embeddings]) embeddings = ( tf.keras.layers.LayerNormalization( name='embeddings/layer_norm', axis=-1, epsilon=1e-12, dtype=tf.float32)(embeddings)) embeddings = ( tf.keras.layers.Dropout(rate=dropout_rate, dtype=tf.float32)(embeddings)) if float_dtype == 'float16': embeddings = tf.cast(embeddings, tf.float16) data = embeddings attention_mask = layers.SelfAttentionMask()([data, mask]) for i in range(num_layers): layer = layers.Transformer( num_attention_heads=num_attention_heads, intermediate_size=intermediate_size, intermediate_activation=activation, dropout_rate=dropout_rate, attention_dropout_rate=attention_dropout_rate, kernel_initializer=initializer, dtype=float_dtype, name='transformer/layer_%d' % i) data = layer([data, attention_mask]) first_token_tensor = ( tf.keras.layers.Lambda(lambda x: tf.squeeze(x[:, 0:1, :], axis=1))(data) ) cls_output = tf.keras.layers.Dense( units=hidden_size, activation='tanh', kernel_initializer=initializer, name='pooler_transform')( first_token_tensor) super(TransformerEncoder, self).__init__( inputs=[word_ids, mask, type_ids], outputs=[data, cls_output], **kwargs) def get_embedding_table(self): return self._embedding_layer.embeddings def get_config(self): return self._config_dict @classmethod def from_config(cls, config, custom_objects=None): return cls(**config)
PyTorch/SpeechSynthesis/FastPitch/filelists
filelists
ljs_audio_pitch_text_test
wavs/LJ045-0096.wav|pitch/LJ045-0096.pt|Mrs. De Mohrenschildt thought that Oswald, wavs/LJ049-0022.wav|pitch/LJ049-0022.pt|The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent. wavs/LJ033-0042.wav|pitch/LJ033-0042.pt|Between the hours of eight and nine p.m. they were occupied with the children in the bedrooms located at the extreme east end of the house. wavs/LJ016-0117.wav|pitch/LJ016-0117.pt|The prisoner had nothing to deal with but wooden panels, and by dint of cutting and chopping he got both the lower panels out. wavs/LJ025-0157.wav|pitch/LJ025-0157.pt|Under these circumstances, unnatural as they are, with proper management, the bean will thrust forth its radicle and its plumule; wavs/LJ042-0219.wav|pitch/LJ042-0219.pt|Oswald demonstrated his thinking in connection with his return to the United States by preparing two sets of identical questions of the type which he might have thought wavs/LJ032-0164.wav|pitch/LJ032-0164.pt|it is not possible to state with scientific certainty that a particular small group of fibers come from a certain piece of clothing wavs/LJ046-0092.wav|pitch/LJ046-0092.pt|has confidence in the dedicated Secret Service men who are ready to lay down their lives for him wavs/LJ050-0118.wav|pitch/LJ050-0118.pt|Since these agencies are already obliged constantly to evaluate the activities of such groups, wavs/LJ043-0016.wav|pitch/LJ043-0016.pt|Jeanne De Mohrenschildt said, quote, wavs/LJ021-0078.wav|pitch/LJ021-0078.pt|no economic panacea, which could simply revive over-night the heavy industries and the trades dependent upon them. wavs/LJ039-0148.wav|pitch/LJ039-0148.pt|Examination of the cartridge cases found on the sixth floor of the Depository Building wavs/LJ047-0202.wav|pitch/LJ047-0202.pt|testified that the information available to the Federal Government about Oswald before the assassination would, if known to PRS, wavs/LJ023-0056.wav|pitch/LJ023-0056.pt|It is an easy document to understand when you remember that it was called into being wavs/LJ021-0025.wav|pitch/LJ021-0025.pt|And in many directions, the intervention of that organized control which we call government wavs/LJ030-0105.wav|pitch/LJ030-0105.pt|Communications in the motorcade. wavs/LJ021-0012.wav|pitch/LJ021-0012.pt|with respect to industry and business, but nearly all are agreed that private enterprise in times such as these wavs/LJ019-0169.wav|pitch/LJ019-0169.pt|and one or two men were allowed to mend clothes and make shoes. The rules made by the Secretary of State were hung up in conspicuous parts of the prison; wavs/LJ039-0088.wav|pitch/LJ039-0088.pt|It just is an aid in seeing in the fact that you only have the one element, the crosshair, wavs/LJ016-0192.wav|pitch/LJ016-0192.pt|"I think I could do that sort of job," said Calcraft, on the spur of the moment. wavs/LJ014-0142.wav|pitch/LJ014-0142.pt|was strewn in front of the dock, and sprinkled it towards the bench with a contemptuous gesture. wavs/LJ012-0015.wav|pitch/LJ012-0015.pt|Weedon and Lecasser to twelve and six months respectively in Coldbath Fields. wavs/LJ048-0033.wav|pitch/LJ048-0033.pt|Prior to November twenty-two, nineteen sixty-three wavs/LJ028-0349.wav|pitch/LJ028-0349.pt|who were each required to send so large a number to Babylon, that in all there were collected no fewer than fifty thousand. wavs/LJ030-0197.wav|pitch/LJ030-0197.pt|At first Mrs. Connally thought that her husband had been killed, wavs/LJ017-0133.wav|pitch/LJ017-0133.pt|Palmer speedily found imitators. wavs/LJ034-0123.wav|pitch/LJ034-0123.pt|Although Brennan testified that the man in the window was standing when he fired the shots, most probably he was either sitting or kneeling. wavs/LJ003-0282.wav|pitch/LJ003-0282.pt|Many years were to elapse before these objections should be fairly met and universally overcome. wavs/LJ032-0204.wav|pitch/LJ032-0204.pt|Special Agent Lyndal L. Shaneyfelt, a photography expert with the FBI, wavs/LJ016-0241.wav|pitch/LJ016-0241.pt|Calcraft served the city of London till eighteen seventy-four, when he was pensioned at the rate of twenty-five shillings per week. wavs/LJ023-0033.wav|pitch/LJ023-0033.pt|we will not allow ourselves to run around in new circles of futile discussion and debate, always postponing the day of decision. wavs/LJ009-0286.wav|pitch/LJ009-0286.pt|There has never been much science in the system of carrying out the extreme penalty in this country; the "finisher of the law" wavs/LJ008-0181.wav|pitch/LJ008-0181.pt|he had his pockets filled with bread and cheese, and it was generally supposed that he had come a long distance to see the fatal show. wavs/LJ015-0052.wav|pitch/LJ015-0052.pt|to the value of twenty thousand pounds. wavs/LJ016-0314.wav|pitch/LJ016-0314.pt|Sir George Grey thought there was a growing feeling in favor of executions within the prison precincts. wavs/LJ047-0056.wav|pitch/LJ047-0056.pt|From August nineteen sixty-two wavs/LJ010-0027.wav|pitch/LJ010-0027.pt|Nor did the methods by which they were perpetrated greatly vary from those in times past. wavs/LJ010-0065.wav|pitch/LJ010-0065.pt|At the former the "Provisional Government" was to be established, wavs/LJ046-0113.wav|pitch/LJ046-0113.pt|The Commission has concluded that at the time of the assassination wavs/LJ028-0410.wav|pitch/LJ028-0410.pt|There among the ruins they still live in the same kind of houses, wavs/LJ044-0137.wav|pitch/LJ044-0137.pt|More seriously, the facts of his defection had become known, leaving him open to almost unanswerable attack by those who opposed his views. wavs/LJ008-0215.wav|pitch/LJ008-0215.pt|One by one the huge uprights of black timber were fitted together, wavs/LJ030-0084.wav|pitch/LJ030-0084.pt|or when the press of the crowd made it impossible for the escort motorcycles to stay in position on the car's rear flanks. wavs/LJ020-0092.wav|pitch/LJ020-0092.pt|Have yourself called on biscuit mornings an hour earlier than usual. wavs/LJ029-0096.wav|pitch/LJ029-0096.pt|On November fourteen, Lawson and Sorrels attended a meeting at Love Field wavs/LJ015-0308.wav|pitch/LJ015-0308.pt|and others who swore to the meetings of the conspirators and their movements. Saward was found guilty, wavs/LJ012-0067.wav|pitch/LJ012-0067.pt|But Mrs. Solomons could not resist the temptation to dabble in stolen goods, and she was found shipping watches of the wrong category to New York. wavs/LJ018-0231.wav|pitch/LJ018-0231.pt|namely, to suppress it and substitute another. wavs/LJ014-0265.wav|pitch/LJ014-0265.pt|and later he became manager of the newly rebuilt Olympic at Wych Street. wavs/LJ024-0102.wav|pitch/LJ024-0102.pt|would be the first to exclaim as soon as an amendment was proposed wavs/LJ007-0233.wav|pitch/LJ007-0233.pt|it consists of several circular perforations, about two inches in diameter, wavs/LJ013-0213.wav|pitch/LJ013-0213.pt|This seems to have decided Courvoisier, wavs/LJ032-0045.wav|pitch/LJ032-0045.pt|This price included nineteen dollars, ninety-five cents for the rifle and the scope, and one dollar, fifty cents for postage and handling. wavs/LJ011-0048.wav|pitch/LJ011-0048.pt|Wherefore let him that thinketh he standeth take heed lest he fall," and was full of the most pointed allusions to the culprit. wavs/LJ005-0294.wav|pitch/LJ005-0294.pt|It was frequently stated in evidence that the jail of the borough was in so unfit a state for the reception of prisoners, wavs/LJ016-0007.wav|pitch/LJ016-0007.pt|There were others less successful. wavs/LJ028-0138.wav|pitch/LJ028-0138.pt|perhaps the tales that travelers told him were exaggerated as travelers' tales are likely to be, wavs/LJ050-0029.wav|pitch/LJ050-0029.pt|that is reflected in definite and comprehensive operating procedures. wavs/LJ014-0121.wav|pitch/LJ014-0121.pt|The prisoners were in due course transferred to Newgate, to be put upon their trial at the Central Criminal Court. wavs/LJ014-0146.wav|pitch/LJ014-0146.pt|They had to handcuff her by force against the most violent resistance, and still she raged and stormed, wavs/LJ046-0111.wav|pitch/LJ046-0111.pt|The Secret Service has attempted to perform this function through the activities of its Protective Research Section wavs/LJ012-0257.wav|pitch/LJ012-0257.pt|But the affair still remained a profound mystery. No light was thrown upon it till, towards the end of March, wavs/LJ002-0260.wav|pitch/LJ002-0260.pt|Yet the public opinion of the whole body seems to have checked dissipation. wavs/LJ031-0014.wav|pitch/LJ031-0014.pt|the Presidential limousine arrived at the emergency entrance of the Parkland Hospital at about twelve:thirty-five p.m. wavs/LJ047-0093.wav|pitch/LJ047-0093.pt|Oswald was arrested and jailed by the New Orleans Police Department for disturbing the peace, in connection with a street fight which broke out when he was accosted wavs/LJ003-0324.wav|pitch/LJ003-0324.pt|gaming of all sorts should be peremptorily forbidden under heavy pains and penalties. wavs/LJ021-0115.wav|pitch/LJ021-0115.pt|we have reached into the heart of the problem which is to provide such annual earnings for the lowest paid worker as will meet his minimum needs. wavs/LJ046-0191.wav|pitch/LJ046-0191.pt|it had established periodic regular review of the status of four hundred individuals; wavs/LJ034-0197.wav|pitch/LJ034-0197.pt|who was one of the first witnesses to alert the police to the Depository as the source of the shots, as has been discussed in chapter three. wavs/LJ002-0253.wav|pitch/LJ002-0253.pt|were governed by rules which they themselves had framed, and under which subscriptions were levied wavs/LJ048-0288.wav|pitch/LJ048-0288.pt|might have been more alert in the Dallas motorcade if they had retired promptly in Fort Worth. wavs/LJ007-0112.wav|pitch/LJ007-0112.pt|Many of the old customs once prevalent in the State Side, so properly condemned and abolished, wavs/LJ017-0189.wav|pitch/LJ017-0189.pt|who was presently attacked in the same way as the others, but, but, thanks to the prompt administration of remedies, he recovered. wavs/LJ042-0230.wav|pitch/LJ042-0230.pt|basically, although I hate the USSR and socialist system I still think marxism can work under different circumstances, end quote. wavs/LJ050-0161.wav|pitch/LJ050-0161.pt|The Secret Service should not and does not plan to develop its own intelligence gathering facilities to duplicate the existing facilities of other Federal agencies. wavs/LJ003-0011.wav|pitch/LJ003-0011.pt|that not more than one bottle of wine or one quart of beer could be issued at one time. No account was taken of the amount of liquors admitted in one day, wavs/LJ008-0206.wav|pitch/LJ008-0206.pt|and caused a number of stout additional barriers to be erected in front of the scaffold, wavs/LJ002-0261.wav|pitch/LJ002-0261.pt|The poorer prisoners were not in abject want, as in other prisons, wavs/LJ012-0189.wav|pitch/LJ012-0189.pt|Hunt, in consideration of the information he had given, escaped death, and was sentenced to transportation for life. wavs/LJ019-0317.wav|pitch/LJ019-0317.pt|The former, which consisted principally of the tread-wheel, cranks, capstans, shot-drill, wavs/LJ011-0041.wav|pitch/LJ011-0041.pt|Visited Mr. Fauntleroy. My application for books for him not having been attended, I had no prayer-book to give him. wavs/LJ023-0089.wav|pitch/LJ023-0089.pt|That is not only my accusation. wavs/LJ044-0224.wav|pitch/LJ044-0224.pt|would not agree with that particular wording, end quote. wavs/LJ013-0104.wav|pitch/LJ013-0104.pt|He found them at length residing at the latter place, one as a landed proprietor, the other as a publican. wavs/LJ013-0055.wav|pitch/LJ013-0055.pt|The jury did not believe him, and the verdict was for the defendants. wavs/LJ014-0306.wav|pitch/LJ014-0306.pt|These had been attributed to political action; some thought that the large purchases in foreign grains, effected at losing prices, wavs/LJ029-0052.wav|pitch/LJ029-0052.pt|To supplement the PRS files, the Secret Service depends largely on local police departments and local offices of other Federal agencies wavs/LJ028-0459.wav|pitch/LJ028-0459.pt|Its bricks, measuring about thirteen inches square and three inches in thickness, were burned and stamped with the usual short inscription: wavs/LJ017-0183.wav|pitch/LJ017-0183.pt|Soon afterwards Dixon died, showing all the symptoms already described. wavs/LJ009-0084.wav|pitch/LJ009-0084.pt|At length the ordinary pauses, and then, in a deep tone, which, though hardly above a whisper, is audible to all, says, wavs/LJ007-0170.wav|pitch/LJ007-0170.pt|That in this vast metropolis, the center of wealth, civilization, and information; wavs/LJ016-0277.wav|pitch/LJ016-0277.pt|This is proved by contemporary accounts, especially one graphic and realistic article which appeared in the 'Times,' wavs/LJ009-0061.wav|pitch/LJ009-0061.pt|He staggers towards the pew, reels into it, stumbles forward, flings himself on the ground, and, by a curious twist of the spine, wavs/LJ019-0201.wav|pitch/LJ019-0201.pt|to select a sufficiently spacious piece of ground, and erect a prison which from foundations to roofs should be in conformity with the newest ideas. wavs/LJ030-0063.wav|pitch/LJ030-0063.pt|He had repeated this wish only a few days before, during his visit to Tampa, Florida. wavs/LJ010-0257.wav|pitch/LJ010-0257.pt|a third miscreant made a similar but far less serious attempt in the month of July following. wavs/LJ009-0106.wav|pitch/LJ009-0106.pt|The keeper tries to appear unmoved, but his eye wanders anxiously over the combustible assembly. wavs/LJ008-0121.wav|pitch/LJ008-0121.pt|After the construction and action of the machine had been explained, the doctor asked the governor what kind of men he had commanded at Goree, wavs/LJ050-0069.wav|pitch/LJ050-0069.pt|the Secret Service had received from the FBI some nine thousand reports on members of the Communist Party. wavs/LJ006-0202.wav|pitch/LJ006-0202.pt|The news-vendor was also a tobacconist, wavs/LJ012-0230.wav|pitch/LJ012-0230.pt|Shortly before the day fixed for execution, Bishop made a full confession, the bulk of which bore the impress of truth, wavs/LJ005-0248.wav|pitch/LJ005-0248.pt|and stated that in his opinion Newgate, as the common jail of Middlesex, was wholly inadequate to the proper confinement of its prisoners. wavs/LJ037-0053.wav|pitch/LJ037-0053.pt|who had been greatly upset by her experience, was able to view a lineup of four men handcuffed together at the police station. wavs/LJ045-0177.wav|pitch/LJ045-0177.pt|For the first time wavs/LJ004-0036.wav|pitch/LJ004-0036.pt|it was hoped that their rulers would hire accommodation in the county prisons, and that the inferior establishments would in course of time disappear. wavs/LJ026-0054.wav|pitch/LJ026-0054.pt|carbohydrates (starch, cellulose) and fats. wavs/LJ020-0085.wav|pitch/LJ020-0085.pt|Break apart from one another and pile on a plate, throwing a clean doily or a small napkin over them. Break open at table. wavs/LJ046-0226.wav|pitch/LJ046-0226.pt|The several military intelligence agencies reported crank mail and similar threats involving the President. wavs/LJ014-0233.wav|pitch/LJ014-0233.pt|he shot an old soldier who had attempted to detain him. He was convicted and executed. wavs/LJ033-0152.wav|pitch/LJ033-0152.pt|The portion of the palm which was identified was the heel of the right palm, i.e., the area near the wrist, on the little finger side. wavs/LJ004-0009.wav|pitch/LJ004-0009.pt|as indefatigable and self-sacrificing, found by personal visitation that the condition of jails throughout the kingdom was, wavs/LJ017-0134.wav|pitch/LJ017-0134.pt|Within a few weeks occurred the Leeds poisoning case, in which the murderer undoubtedly was inspired by the facts made public at Palmer's trial. wavs/LJ019-0318.wav|pitch/LJ019-0318.pt|was to be the rule for all convicted prisoners throughout the early stages of their detention; wavs/LJ020-0093.wav|pitch/LJ020-0093.pt|Rise, wash face and hands, rinse the mouth out and brush back the hair. wavs/LJ012-0188.wav|pitch/LJ012-0188.pt|Probert was then admitted as a witness, and the case was fully proved against Thurtell, who was hanged in front of Hertford Jail. wavs/LJ019-0202.wav|pitch/LJ019-0202.pt|The preference given to the Pentonville system destroyed all hopes of a complete reformation of Newgate. wavs/LJ039-0027.wav|pitch/LJ039-0027.pt|Oswald's revolver wavs/LJ040-0176.wav|pitch/LJ040-0176.pt|He admitted to fantasies about being powerful and sometimes hurting and killing people, but refused to elaborate on them. wavs/LJ018-0354.wav|pitch/LJ018-0354.pt|Doubts were long entertained whether Thomas Wainwright, wavs/LJ031-0185.wav|pitch/LJ031-0185.pt|From the Presidential airplane, the Vice President telephoned Attorney General Robert F. Kennedy, wavs/LJ006-0137.wav|pitch/LJ006-0137.pt|They were not obliged to attend chapel, and seldom if ever went; "prisoners," said one of them under examination, "did not like the trouble of going to chapel." wavs/LJ032-0085.wav|pitch/LJ032-0085.pt|The Hidell signature on the notice of classification was in the handwriting of Oswald. wavs/LJ009-0037.wav|pitch/LJ009-0037.pt|the schoolmaster and the juvenile prisoners being seated round the communion-table, opposite the pulpit. wavs/LJ006-0021.wav|pitch/LJ006-0021.pt|Later on he had devoted himself to the personal investigation of the prisons of the United States. wavs/LJ006-0082.wav|pitch/LJ006-0082.pt|and this particular official took excellent care to select as residents for his own ward those most suitable from his own point of view. wavs/LJ016-0380.wav|pitch/LJ016-0380.pt|with hope to the last. There is always the chance of a flaw in the indictment, of a missing witness, or extenuating circumstances. wavs/LJ019-0344.wav|pitch/LJ019-0344.pt|monitor, or schoolmaster, nor to be engaged in the service of any officer of the prison. wavs/LJ019-0161.wav|pitch/LJ019-0161.pt|These disciplinary improvements were, however, only slowly and gradually introduced. wavs/LJ028-0145.wav|pitch/LJ028-0145.pt|And here I may not omit to tell the use to which the mould dug out of the great moat was turned, nor the manner wherein the wall was wrought. wavs/LJ018-0349.wav|pitch/LJ018-0349.pt|His disclaimer, distinct and detailed on every point, was intended simply for effect. wavs/LJ043-0010.wav|pitch/LJ043-0010.pt|Some of the members of that group saw a good deal of the Oswalds through the fall of nineteen sixty-three, wavs/LJ027-0178.wav|pitch/LJ027-0178.pt|These were undoubtedly perennibranchs. In the Permian and Triassic higher forms appeared, which were certainly caducibranch. wavs/LJ041-0070.wav|pitch/LJ041-0070.pt|He did not rise above the rank of private first class, even though he had passed a qualifying examination for the rank of corporal. wavs/LJ008-0266.wav|pitch/LJ008-0266.pt|Thus in the years between May first, eighteen twenty-seven, and thirtieth April, eighteen thirty-one, wavs/LJ021-0091.wav|pitch/LJ021-0091.pt|In this recent reorganization we have recognized three distinct functions: wavs/LJ019-0129.wav|pitch/LJ019-0129.pt|which marked the growth of public interest in prison affairs, and which was the germ of the new system wavs/LJ018-0215.wav|pitch/LJ018-0215.pt|William Roupell was the eldest but illegitimate son of a wealthy man who subsequently married Roupell's mother, and had further legitimate issue. wavs/LJ015-0194.wav|pitch/LJ015-0194.pt|and behaved so as to justify a belief that he had been a jail-bird all his life. wavs/LJ016-0137.wav|pitch/LJ016-0137.pt|that numbers of men, "lifers," and others with ten, fourteen, or twenty years to do, can be trusted to work out of doors without bolts and bars wavs/LJ002-0289.wav|pitch/LJ002-0289.pt|the latter raised eighteen pence among them to pay for a truss of straw for the poor woman to lie on. wavs/LJ023-0016.wav|pitch/LJ023-0016.pt|In nineteen thirty-three you and I knew that we must never let our economic system get completely out of joint again wavs/LJ011-0141.wav|pitch/LJ011-0141.pt|There were at the moment in Newgate six convicts sentenced to death for forging wills. wavs/LJ016-0283.wav|pitch/LJ016-0283.pt|to do them mere justice, there was at least till then a half-drunken ribald gaiety among the crowd that made them all akin." wavs/LJ035-0082.wav|pitch/LJ035-0082.pt|The only interval was the time necessary to ride in the elevator from the second to the sixth floor and walk back to the southeast corner. wavs/LJ045-0194.wav|pitch/LJ045-0194.pt|Anyone who was familiar with that area of Dallas would have known that the motorcade would probably pass the Texas School Book Depository to get from Main Street wavs/LJ009-0124.wav|pitch/LJ009-0124.pt|occupied when they saw it last, but a few hours ago, by their comrades who are now dead; wavs/LJ030-0162.wav|pitch/LJ030-0162.pt|In the Presidential Limousine wavs/LJ050-0223.wav|pitch/LJ050-0223.pt|The plan provides for an additional two hundred five agents for the Secret Service. Seventeen of this number are proposed for the Protective Research Section; wavs/LJ008-0228.wav|pitch/LJ008-0228.pt|their harsh and half-cracked voices full of maudlin, besotted sympathy for those about to die. wavs/LJ002-0096.wav|pitch/LJ002-0096.pt|The eight courts above enumerated were well supplied with water; wavs/LJ018-0288.wav|pitch/LJ018-0288.pt|After this the other conspirators traveled to obtain genuine bills and master the system of the leading houses at home and abroad. wavs/LJ002-0106.wav|pitch/LJ002-0106.pt|in which latterly a copper had been fixed for the cooking of provisions sent in by charitable persons. wavs/LJ025-0129.wav|pitch/LJ025-0129.pt|On each lobe of the bi-lobed leaf of Venus flytrap are three delicate filaments which stand out at right angles from the surface of the leaf. wavs/LJ044-0013.wav|pitch/LJ044-0013.pt|Hands Off Cuba, end quote, an application form for, and a membership card in, wavs/LJ049-0115.wav|pitch/LJ049-0115.pt|of the person who is actually in the exercise of the executive power, or wavs/LJ019-0145.wav|pitch/LJ019-0145.pt|But reformation was only skin deep. Below the surface many of the old evils still rankled. wavs/LJ019-0355.wav|pitch/LJ019-0355.pt|came up in all respects to modern requirements. wavs/LJ019-0289.wav|pitch/LJ019-0289.pt|There was unrestrained association of untried and convicted, juvenile with adult prisoners, vagrants, misdemeanants, felons. wavs/LJ048-0222.wav|pitch/LJ048-0222.pt|in Fort Worth, there occurred a breach of discipline by some members of the Secret Service who were officially traveling with the President. wavs/LJ016-0367.wav|pitch/LJ016-0367.pt|Under the new system the whole of the arrangements from first to last fell upon the officers. wavs/LJ047-0097.wav|pitch/LJ047-0097.pt|Agent Quigley did not know of Oswald's prior FBI record when he interviewed him, wavs/LJ007-0075.wav|pitch/LJ007-0075.pt|as effectually to rebuke and abash the profane spirit of the more insolent and daring of the criminals. wavs/LJ047-0022.wav|pitch/LJ047-0022.pt|provided by other agencies. wavs/LJ007-0085.wav|pitch/LJ007-0085.pt|at Newgate and York Castle as long as five years; "at Ilchester and Morpeth for seven years; at Warwick for eight years, wavs/LJ047-0075.wav|pitch/LJ047-0075.pt|Hosty had inquired earlier and found no evidence that it was functioning in the Dallas area. wavs/LJ008-0098.wav|pitch/LJ008-0098.pt|One was the "yeoman of the halter," a Newgate official, the executioner's assistant, whom Mr. J. T. Smith, who was present at the execution, wavs/LJ017-0102.wav|pitch/LJ017-0102.pt|The second attack was fatal, and ended in Cook's death from tetanus. wavs/LJ046-0105.wav|pitch/LJ046-0105.pt|Second, the adequacy of other advance preparations for the security of the President, during his visit to Dallas, wavs/LJ018-0206.wav|pitch/LJ018-0206.pt|He was a tall, slender man, with a long face and iron-gray hair. wavs/LJ012-0271.wav|pitch/LJ012-0271.pt|Whether it was greed or a quarrel that drove Greenacre to the desperate deed remains obscure. wavs/LJ005-0086.wav|pitch/LJ005-0086.pt|with such further separation as the justices should deem conducive to good order and discipline. wavs/LJ042-0097.wav|pitch/LJ042-0097.pt|and considerably better living quarters than those accorded to Soviet citizens of equal age and station. wavs/LJ047-0126.wav|pitch/LJ047-0126.pt|we would handle it in due course, in accord with the whole context of the investigation. End quote. wavs/LJ041-0022.wav|pitch/LJ041-0022.pt|Oswald first wrote, quote, Edward Vogel, end quote, an obvious misspelling of Voebel's name, wavs/LJ015-0025.wav|pitch/LJ015-0025.pt|The bank enjoyed an excellent reputation, it had a good connection, and was supposed to be perfectly sound. wavs/LJ012-0194.wav|pitch/LJ012-0194.pt|But Burke and Hare had their imitators further south, wavs/LJ028-0416.wav|pitch/LJ028-0416.pt|(if man may speak so confidently of His great impenetrable counsels), for an eternal Testimony of His great work in the confusion of Man's pride, wavs/LJ007-0130.wav|pitch/LJ007-0130.pt|are all huddled together without discrimination, oversight, or control." wavs/LJ015-0005.wav|pitch/LJ015-0005.pt|About this time Davidson and Gordon, the people above-mentioned, wavs/LJ016-0125.wav|pitch/LJ016-0125.pt|with this, placed against the wall near the chevaux-de-frise, he made an escalade. wavs/LJ014-0224.wav|pitch/LJ014-0224.pt|As Dwyer survived, Cannon escaped the death sentence, which was commuted to penal servitude for life. wavs/LJ005-0019.wav|pitch/LJ005-0019.pt|refuted by abundant evidence, and having no foundation whatever in truth. wavs/LJ042-0221.wav|pitch/LJ042-0221.pt|With either great ambivalence, or cold calculation he prepared completely different answers to the same questions. wavs/LJ001-0063.wav|pitch/LJ001-0063.pt|which was generally more formally Gothic than the printing of the German workmen, wavs/LJ030-0006.wav|pitch/LJ030-0006.pt|They took off in the Presidential plane, Air Force One, at eleven a.m., arriving at San Antonio at one:thirty p.m., Eastern Standard Time. wavs/LJ024-0054.wav|pitch/LJ024-0054.pt|democracy will have failed far beyond the importance to it of any king of precedent concerning the judiciary. wavs/LJ006-0044.wav|pitch/LJ006-0044.pt|the same callous indifference to the moral well-being of the prisoners, the same want of employment and of all disciplinary control. wavs/LJ039-0154.wav|pitch/LJ039-0154.pt|four point eight to five point six seconds if the second shot missed, wavs/LJ050-0090.wav|pitch/LJ050-0090.pt|they seem unduly restrictive in continuing to require some manifestation of animus against a Government official. wavs/LJ028-0421.wav|pitch/LJ028-0421.pt|it was the beginning of the great collections of Babylonian antiquities in the museums of the Western world. wavs/LJ033-0205.wav|pitch/LJ033-0205.pt|then I would say the possibility exists, these fibers could have come from this blanket, end quote. wavs/LJ019-0335.wav|pitch/LJ019-0335.pt|The books and journals he was to keep were minutely specified, and his constant presence in or near the jail was insisted upon. wavs/LJ013-0045.wav|pitch/LJ013-0045.pt|Wallace's relations warned him against his Liverpool friend, wavs/LJ037-0002.wav|pitch/LJ037-0002.pt|Chapter four. The Assassin: Part six. wavs/LJ018-0159.wav|pitch/LJ018-0159.pt|This was all the police wanted to know. wavs/LJ026-0140.wav|pitch/LJ026-0140.pt|In the plant as in the animal metabolism must consist of anabolic and catabolic processes. wavs/LJ014-0171.wav|pitch/LJ014-0171.pt|I will briefly describe one or two of the more remarkable murders in the years immediately following, then pass on to another branch of crime. wavs/LJ037-0007.wav|pitch/LJ037-0007.pt|Three others subsequently identified Oswald from a photograph. wavs/LJ033-0174.wav|pitch/LJ033-0174.pt|microscopic and UV (ultra violet) characteristics, end quote. wavs/LJ040-0110.wav|pitch/LJ040-0110.pt|he apparently adjusted well enough there to have had an average, although gradually deteriorating, school record wavs/LJ039-0192.wav|pitch/LJ039-0192.pt|he had a total of between four point eight and five point six seconds between the two shots which hit wavs/LJ032-0261.wav|pitch/LJ032-0261.pt|When he appeared before the Commission, Michael Paine lifted the blanket wavs/LJ040-0097.wav|pitch/LJ040-0097.pt|Lee was brought up in this atmosphere of constant money problems, and I am sure it had quite an effect on him, and also Robert, end quote. wavs/LJ037-0249.wav|pitch/LJ037-0249.pt|Mrs. Earlene Roberts, the housekeeper at Oswald's roominghouse and the last person known to have seen him before he reached tenth Street and Patton Avenue, wavs/LJ016-0248.wav|pitch/LJ016-0248.pt|Marwood was proud of his calling, and when questioned as to whether his process was satisfactory, replied that he heard "no complaints." wavs/LJ004-0083.wav|pitch/LJ004-0083.pt|As Mr. Buxton pointed out, many old acts of parliament designed to protect the prisoner were still in full force. wavs/LJ014-0029.wav|pitch/LJ014-0029.pt|This was Delarue's watch, fully identified as such, which Hocker told his brother Delarue had given him the morning of the murder. wavs/LJ021-0110.wav|pitch/LJ021-0110.pt|have been best calculated to promote industrial recovery and a permanent improvement of business and labor conditions. wavs/LJ003-0107.wav|pitch/LJ003-0107.pt|he slept in the same bed with a highwayman on one side, and a man charged with murder on the other. wavs/LJ039-0076.wav|pitch/LJ039-0076.pt|Ronald Simmons, chief of the U.S. Army Infantry Weapons Evaluation Branch of the Ballistics Research Laboratory, said, quote, wavs/LJ016-0347.wav|pitch/LJ016-0347.pt|had undoubtedly a solemn, impressive effect upon those outside. wavs/LJ001-0072.wav|pitch/LJ001-0072.pt|After the end of the fifteenth century the degradation of printing, especially in Germany and Italy, wavs/LJ024-0018.wav|pitch/LJ024-0018.pt|Consequently, although there never can be more than fifteen, there may be only fourteen, or thirteen, or twelve. wavs/LJ032-0180.wav|pitch/LJ032-0180.pt|that the fibers were caught in the crevice of the rifle's butt plate, quote, in the recent past, end quote, wavs/LJ010-0083.wav|pitch/LJ010-0083.pt|and measures taken to arrest them when their plans were so far developed that no doubt could remain as to their guilt. wavs/LJ002-0299.wav|pitch/LJ002-0299.pt|and gave the garnish for the common side at that sum, which is five shillings more than Mr. Neild says was extorted on the common side. wavs/LJ048-0143.wav|pitch/LJ048-0143.pt|the Secret Service did not at the time of the assassination have any established procedure governing its relationships with them. wavs/LJ012-0054.wav|pitch/LJ012-0054.pt|Solomons, while waiting to appear in court, persuaded the turnkeys to take him to a public-house, where all might "refresh." wavs/LJ019-0270.wav|pitch/LJ019-0270.pt|Vegetables, especially the potato, that most valuable anti-scorbutic, was too often omitted. wavs/LJ035-0164.wav|pitch/LJ035-0164.pt|three minutes after the shooting. wavs/LJ014-0326.wav|pitch/LJ014-0326.pt|Maltby and Co. would issue warrants on them deliverable to the importer, and the goods were then passed to be stored in neighboring warehouses. wavs/LJ001-0173.wav|pitch/LJ001-0173.pt|The essential point to be remembered is that the ornament, whatever it is, whether picture or pattern-work, should form part of the page, wavs/LJ050-0056.wav|pitch/LJ050-0056.pt|On December twenty-six, nineteen sixty-three, the FBI circulated additional instructions to all its agents, wavs/LJ003-0319.wav|pitch/LJ003-0319.pt|provided only that their security was not jeopardized, and dependent upon the enforcement of another new rule, wavs/LJ006-0040.wav|pitch/LJ006-0040.pt|The fact was that the years as they passed, nearly twenty in all, had worked but little permanent improvement in this detestable prison. wavs/LJ017-0231.wav|pitch/LJ017-0231.pt|His body was found lying in a pool of blood in a night-dress, stabbed over and over again in the left side. wavs/LJ017-0226.wav|pitch/LJ017-0226.pt|One half of the mutineers fell upon him unawares with handspikes and capstan-bars. wavs/LJ004-0239.wav|pitch/LJ004-0239.pt|He had been committed for an offense for which he was acquitted. wavs/LJ048-0112.wav|pitch/LJ048-0112.pt|The Commission also regards the security arrangements worked out by Lawson and Sorrels at Love Field as entirely adequate. wavs/LJ039-0125.wav|pitch/LJ039-0125.pt|that Oswald was a good shot, somewhat better than or equal to -- better than the average let us say. wavs/LJ030-0196.wav|pitch/LJ030-0196.pt|He cried out, quote, Oh, no, no, no. My God, they are going to kill us all, end quote, wavs/LJ010-0228.wav|pitch/LJ010-0228.pt|He was released from Broadmoor in eighteen seventy-eight, and went abroad. wavs/LJ045-0228.wav|pitch/LJ045-0228.pt|On the other hand, he could have traveled some distance with the money he did have and he did return to his room where he obtained his revolver. wavs/LJ028-0168.wav|pitch/LJ028-0168.pt|in the other was the sacred precinct of Jupiter Belus, wavs/LJ021-0140.wav|pitch/LJ021-0140.pt|and in such an effort we should be able to secure for employers and employees and consumers wavs/LJ009-0280.wav|pitch/LJ009-0280.pt|Again the wretched creature succeeded in obtaining foothold, but this time on the left side of the drop. wavs/LJ003-0159.wav|pitch/LJ003-0159.pt|To constitute this the aristocratic quarter, unwarrantable demands were made upon the space properly allotted to the female felons, wavs/LJ016-0274.wav|pitch/LJ016-0274.pt|and the windows of the opposite houses, which commanded a good view, as usual fetched high prices. wavs/LJ035-0014.wav|pitch/LJ035-0014.pt|it sounded high and I immediately kind of looked up, wavs/LJ033-0120.wav|pitch/LJ033-0120.pt|which he believed was where the bag reached when it was laid on the seat with one edge against the door. wavs/LJ045-0015.wav|pitch/LJ045-0015.pt|which Johnson said he did not receive until after the assassination. The letter said in part, quote, wavs/LJ003-0299.wav|pitch/LJ003-0299.pt|the latter end of the nineteenth century, several of which still fall far short of our English ideal, wavs/LJ032-0206.wav|pitch/LJ032-0206.pt|After comparing the rifle in the simulated photograph with the rifle in Exhibit Number one thirty-three A, Shaneyfelt testified, quote, wavs/LJ028-0494.wav|pitch/LJ028-0494.pt|Between the several sections were wide spaces where foot soldiers and charioteers might fight. wavs/LJ005-0099.wav|pitch/LJ005-0099.pt|and report at length upon the condition of the prisons of the country. wavs/LJ015-0144.wav|pitch/LJ015-0144.pt|developed to a colossal extent the frauds he had already practiced as a subordinate. wavs/LJ019-0221.wav|pitch/LJ019-0221.pt|It was intended as far as possible that, except awaiting trial, no prisoner should find himself relegated to Newgate. wavs/LJ003-0088.wav|pitch/LJ003-0088.pt|in one, for seven years -- that of a man sentenced to death, for whom great interest had been made, but whom it was not thought right to pardon. wavs/LJ045-0216.wav|pitch/LJ045-0216.pt|nineteen sixty-three, merely to disarm her and to provide a justification of sorts, wavs/LJ042-0135.wav|pitch/LJ042-0135.pt|that he was not yet twenty years old when he went to the Soviet Union with such high hopes and not quite twenty-three when he returned bitterly disappointed. wavs/LJ049-0196.wav|pitch/LJ049-0196.pt|On the other hand, it is urged that all features of the protection of the President and his family should be committed to an elite and independent corps. wavs/LJ018-0278.wav|pitch/LJ018-0278.pt|This was the well and astutely devised plot of the brothers Bidwell, wavs/LJ030-0238.wav|pitch/LJ030-0238.pt|and then looked around again and saw more of this movement, and so I proceeded to go to the back seat and get on top of him. wavs/LJ018-0309.wav|pitch/LJ018-0309.pt|where probably the money still remains. wavs/LJ041-0199.wav|pitch/LJ041-0199.pt|is shown most clearly by his employment relations after his return from the Soviet Union. Of course, he made his real problems worse to the extent wavs/LJ007-0076.wav|pitch/LJ007-0076.pt|The lax discipline maintained in Newgate was still further deteriorated by the presence of two other classes of prisoners who ought never to have been inmates of such a jail. wavs/LJ039-0118.wav|pitch/LJ039-0118.pt|He had high motivation. He had presumably a good to excellent rifle and good ammunition. wavs/LJ024-0019.wav|pitch/LJ024-0019.pt|And there may be only nine. wavs/LJ008-0085.wav|pitch/LJ008-0085.pt|The fire had not quite burnt out at twelve, in nearly four hours, that is to say. wavs/LJ018-0031.wav|pitch/LJ018-0031.pt|This fixed the crime pretty certainly upon Müller, who had already left the country, thus increasing suspicion under which he lay. wavs/LJ030-0032.wav|pitch/LJ030-0032.pt|Dallas police stood at intervals along the fence and Dallas plain clothes men mixed in the crowd. wavs/LJ050-0004.wav|pitch/LJ050-0004.pt|General Supervision of the Secret Service wavs/LJ039-0096.wav|pitch/LJ039-0096.pt|This is a definite advantage to the shooter, the vehicle moving directly away from him and the downgrade of the street, and he being in an elevated position wavs/LJ041-0195.wav|pitch/LJ041-0195.pt|Oswald's interest in Marxism led some people to avoid him, wavs/LJ047-0158.wav|pitch/LJ047-0158.pt|After a moment's hesitation, she told me that he worked at the Texas School Book Depository near the downtown area of Dallas. wavs/LJ050-0162.wav|pitch/LJ050-0162.pt|In planning its data processing techniques, wavs/LJ001-0051.wav|pitch/LJ001-0051.pt|and paying great attention to the "press work" or actual process of printing, wavs/LJ028-0136.wav|pitch/LJ028-0136.pt|Of all the ancient descriptions of the famous walls and the city they protected, that of Herodotus is the fullest. wavs/LJ034-0134.wav|pitch/LJ034-0134.pt|Shortly after the assassination Brennan noticed wavs/LJ019-0348.wav|pitch/LJ019-0348.pt|Every facility was promised. The sanction of the Secretary of State would not be withheld if plans and estimates were duly submitted, wavs/LJ010-0219.wav|pitch/LJ010-0219.pt|While one stood over the fire with the papers, another stood with lighted torch to fire the house. wavs/LJ011-0245.wav|pitch/LJ011-0245.pt|Mr. Mullay called again, taking with him five hundred pounds in cash. Howard discovered this, and his manner was very suspicious; wavs/LJ030-0035.wav|pitch/LJ030-0035.pt|Organization of the Motorcade wavs/LJ044-0135.wav|pitch/LJ044-0135.pt|While he had drawn some attention to himself and had actually appeared on two radio programs, he had been attacked by Cuban exiles and arrested, wavs/LJ045-0090.wav|pitch/LJ045-0090.pt|He was very much interested in autobiographical works of outstanding statesmen of the United States, to whom his wife thought he compared himself. wavs/LJ026-0034.wav|pitch/LJ026-0034.pt|When any given "protist" has to be classified the case must be decided on its individual merits; wavs/LJ045-0092.wav|pitch/LJ045-0092.pt|as to the fact that he was an outstanding man, end quote. wavs/LJ017-0050.wav|pitch/LJ017-0050.pt|Palmer, who was only thirty-one at the time of his trial, was in appearance short and stout, with a round head wavs/LJ036-0104.wav|pitch/LJ036-0104.pt|Whaley picked Oswald. wavs/LJ019-0055.wav|pitch/LJ019-0055.pt|High authorities were in favor of continuous separation. wavs/LJ010-0030.wav|pitch/LJ010-0030.pt|The brutal ferocity of the wild beast once aroused, the same means, the same weapons were employed to do the dreadful deed, wavs/LJ038-0047.wav|pitch/LJ038-0047.pt|Some of the officers saw Oswald strike McDonald with his fist. Most of them heard a click which they assumed to be a click of the hammer of the revolver. wavs/LJ009-0074.wav|pitch/LJ009-0074.pt|Let us pass on. wavs/LJ048-0069.wav|pitch/LJ048-0069.pt|Efforts made by the Bureau since the assassination, on the other hand, wavs/LJ003-0211.wav|pitch/LJ003-0211.pt|They were never left quite alone for fear of suicide, and for the same reason they were searched for weapons or poisons. wavs/LJ048-0053.wav|pitch/LJ048-0053.pt|It is the conclusion of the Commission that, even in the absence of Secret Service criteria wavs/LJ033-0093.wav|pitch/LJ033-0093.pt|Frazier estimated that the bag was two feet long, quote, give and take a few inches, end quote, and about five or six inches wide. wavs/LJ006-0149.wav|pitch/LJ006-0149.pt|The turnkeys left the prisoners very much to themselves, never entering the wards after locking-up time, at dusk, till unlocking next morning, wavs/LJ018-0211.wav|pitch/LJ018-0211.pt|The false coin was bought by an agent from an agent, and dealings were carried on secretly at the "Clock House" in Seven Dials. wavs/LJ008-0054.wav|pitch/LJ008-0054.pt|This contrivance appears to have been copied with improvements from that which had been used in Dublin at a still earlier date, wavs/LJ040-0052.wav|pitch/LJ040-0052.pt|that his commitment to Marxism was an important factor influencing his conduct during his adult years. wavs/LJ028-0023.wav|pitch/LJ028-0023.pt|Two weeks pass, and at last you stand on the eastern edge of the plateau wavs/LJ009-0184.wav|pitch/LJ009-0184.pt|Lord Ferrers' body was brought to Surgeons' Hall after execution in his own carriage and six; wavs/LJ005-0252.wav|pitch/LJ005-0252.pt|A committee was appointed, under the presidency of the Duke of Richmond wavs/LJ015-0266.wav|pitch/LJ015-0266.pt|has probably no parallel in the annals of crime. Saward himself is a striking and in some respects an unique figure in criminal history. wavs/LJ017-0059.wav|pitch/LJ017-0059.pt|even after sentence, and until within a few hours of execution, he was buoyed up with the hope of reprieve. wavs/LJ024-0034.wav|pitch/LJ024-0034.pt|What do they mean by the words "packing the Court"? wavs/LJ016-0089.wav|pitch/LJ016-0089.pt|He was engaged in whitewashing and cleaning; the officer who had him in charge left him on the stairs leading to the gallery. wavs/LJ039-0227.wav|pitch/LJ039-0227.pt|with two hits, within four point eight and five point six seconds. wavs/LJ001-0096.wav|pitch/LJ001-0096.pt|have now come into general use and are obviously a great improvement on the ordinary "modern style" in use in England, which is in fact the Bodoni type wavs/LJ018-0129.wav|pitch/LJ018-0129.pt|who threatened to betray the theft. But Brewer, either before or after this, succumbed to temptation, wavs/LJ010-0157.wav|pitch/LJ010-0157.pt|and that, as he was starving, he had resolved on this desperate deed, wavs/LJ038-0264.wav|pitch/LJ038-0264.pt|He concluded that, quote, the general rifling characteristics of the rifle are of the same type as those found on the bullet wavs/LJ031-0165.wav|pitch/LJ031-0165.pt|When security arrangements at the airport were complete, the Secret Service made the necessary arrangements for the Vice President to leave the hospital. wavs/LJ018-0244.wav|pitch/LJ018-0244.pt|The effect of establishing the forgeries would be to restore to the Roupell family lands for which a price had already been paid wavs/LJ007-0071.wav|pitch/LJ007-0071.pt|in the face of impediments confessedly discouraging wavs/LJ028-0340.wav|pitch/LJ028-0340.pt|Such of the Babylonians as witnessed the treachery took refuge in the temple of Jupiter Belus; wavs/LJ017-0164.wav|pitch/LJ017-0164.pt|with the idea of subjecting her to the irritant poison slowly but surely until the desired effect, death, was achieved. wavs/LJ048-0197.wav|pitch/LJ048-0197.pt|I then told the officers that their primary duty was traffic and crowd control and that they should be alert for any persons who might attempt to throw anything wavs/LJ013-0098.wav|pitch/LJ013-0098.pt|Mr. Oxenford having denied that he had made any transfer of stock, the matter was at once put into the hands of the police. wavs/LJ012-0049.wav|pitch/LJ012-0049.pt|led him to think seriously of trying his fortunes in another land. wavs/LJ030-0014.wav|pitch/LJ030-0014.pt|quote, that the crowd was about the same as the one which came to see him before but there were one hundred thousand extra people on hand who came to see Mrs. Kennedy. wavs/LJ014-0186.wav|pitch/LJ014-0186.pt|A milliner's porter, wavs/LJ015-0027.wav|pitch/LJ015-0027.pt|Yet even so early as the death of the first Sir John Paul, wavs/LJ047-0049.wav|pitch/LJ047-0049.pt|Marina Oswald, however, recalled that her husband was upset by this interview. wavs/LJ012-0021.wav|pitch/LJ012-0021.pt|at fourteen he was a pickpocket and a "duffer," or a seller of sham goods. wavs/LJ003-0140.wav|pitch/LJ003-0140.pt|otherwise he would have been stripped of his clothes. End quote. wavs/LJ042-0130.wav|pitch/LJ042-0130.pt|Shortly thereafter, less than eighteen months after his defection, about six weeks before he met Marina Prusakova, wavs/LJ019-0180.wav|pitch/LJ019-0180.pt|His letter to the Corporation, under date fourth June, wavs/LJ017-0108.wav|pitch/LJ017-0108.pt|He was struck with the appearance of the corpse, which was not emaciated, as after a long disease ending in death; wavs/LJ006-0268.wav|pitch/LJ006-0268.pt|Women saw men if they merely pretended to be wives; even boys were visited by their sweethearts. wavs/LJ044-0125.wav|pitch/LJ044-0125.pt|of residence in the U.S.S.R. against any cause which I join, by association, wavs/LJ015-0231.wav|pitch/LJ015-0231.pt|It was Tester's business, who had access to the railway company's books, to watch for this. wavs/LJ002-0225.wav|pitch/LJ002-0225.pt|The rentals of rooms and fees went to the warden, whose income was two thousand three hundred seventy-two pounds. wavs/LJ034-0072.wav|pitch/LJ034-0072.pt|The employees raced the elevators to the first floor. Givens saw Oswald standing at the gate on the fifth floor as the elevator went by. wavs/LJ045-0033.wav|pitch/LJ045-0033.pt|He began to treat me better. He helped me more -- although he always did help. But he was more attentive, end quote. wavs/LJ031-0058.wav|pitch/LJ031-0058.pt|to infuse blood and fluids into the circulatory system. wavs/LJ029-0197.wav|pitch/LJ029-0197.pt|During November the Dallas papers reported frequently on the plans for protecting the President, stressing the thoroughness of the preparations. wavs/LJ043-0047.wav|pitch/LJ043-0047.pt|Oswald and his family lived for a brief period with his mother at her urging, but Oswald soon decided to move out. wavs/LJ021-0026.wav|pitch/LJ021-0026.pt|seems necessary to produce the same result of justice and right conduct wavs/LJ003-0230.wav|pitch/LJ003-0230.pt|The prison allowances were eked out by the broken victuals generously given by several eating-house keepers in the city, wavs/LJ037-0252.wav|pitch/LJ037-0252.pt|Ted Callaway, who saw the gunman moments after the shooting, testified that Commission Exhibit Number one sixty-two wavs/LJ031-0008.wav|pitch/LJ031-0008.pt|Meanwhile, Chief Curry ordered the police base station to notify Parkland Hospital that the wounded President was en route. wavs/LJ030-0021.wav|pitch/LJ030-0021.pt|all one had to do was get a high building someday with a telescopic rifle, and there was nothing anybody could do to defend against such an attempt. wavs/LJ046-0179.wav|pitch/LJ046-0179.pt|being reviewed regularly. wavs/LJ025-0118.wav|pitch/LJ025-0118.pt|and that, however diverse may be the fabrics or tissues of which their bodies are composed, all these varied structures result wavs/LJ028-0278.wav|pitch/LJ028-0278.pt|Zopyrus, when they told him, not thinking that it could be true, went and saw the colt with his own eyes; wavs/LJ007-0090.wav|pitch/LJ007-0090.pt|Not only did their presence tend greatly to interfere with the discipline of the prison, but their condition was deplorable in the extreme. wavs/LJ045-0045.wav|pitch/LJ045-0045.pt|that she would be able to leave the Soviet Union. Marina Oswald has denied this. wavs/LJ028-0289.wav|pitch/LJ028-0289.pt|For he cut off his own nose and ears, and then, clipping his hair close and flogging himself with a scourge, wavs/LJ009-0276.wav|pitch/LJ009-0276.pt|Calcraft, the moment he had adjusted the cap and rope, ran down the steps, drew the bolt, and disappeared. wavs/LJ031-0122.wav|pitch/LJ031-0122.pt|treated the gunshot wound in the left thigh. wavs/LJ016-0205.wav|pitch/LJ016-0205.pt|he received a retaining fee of five pounds, five shillings, with the usual guinea for each job; wavs/LJ019-0248.wav|pitch/LJ019-0248.pt|leading to an inequality, uncertainty, and inefficiency of punishment productive of the most prejudicial results. wavs/LJ033-0183.wav|pitch/LJ033-0183.pt|it was not surprising that the replica sack made on December one, nineteen sixty-three, wavs/LJ037-0001.wav|pitch/LJ037-0001.pt|Report of the President's Commission on the Assassination of President Kennedy. The Warren Commission Report. By The President's Commission on the Assassination of President Kennedy. wavs/LJ018-0218.wav|pitch/LJ018-0218.pt|In eighteen fifty-five wavs/LJ001-0102.wav|pitch/LJ001-0102.pt|Here and there a book is printed in France or Germany with some pretension to good taste, wavs/LJ007-0125.wav|pitch/LJ007-0125.pt|It was diverted from its proper uses, and, as the "place of the greatest comfort," was allotted to persons who should not have been sent to Newgate at all. wavs/LJ050-0022.wav|pitch/LJ050-0022.pt|A formal and thorough description of the responsibilities of the advance agent is now in preparation by the Service. wavs/LJ028-0212.wav|pitch/LJ028-0212.pt|On the night of the eleventh day Gobrias killed the son of the King. wavs/LJ028-0357.wav|pitch/LJ028-0357.pt|yet we may be sure that Babylon was taken by Darius only by use of stratagem. Its walls were impregnable. wavs/LJ014-0199.wav|pitch/LJ014-0199.pt|there was no case to make out; why waste money on lawyers for the defense? His demeanor was cool and collected throughout; wavs/LJ016-0077.wav|pitch/LJ016-0077.pt|A man named Lears, under sentence of transportation for an attempt at murder on board ship, got up part of the way, wavs/LJ009-0194.wav|pitch/LJ009-0194.pt|and that executors or persons having lawful possession of the bodies wavs/LJ014-0094.wav|pitch/LJ014-0094.pt|Discovery of the murder came in this wise. O'Connor, a punctual and well-conducted official, was at once missed at the London Docks. wavs/LJ001-0079.wav|pitch/LJ001-0079.pt|Caslon's type is clear and neat, and fairly well designed; wavs/LJ026-0052.wav|pitch/LJ026-0052.pt|In the nutrition of the animal the most essential and characteristic part of the food supply is derived from vegetable wavs/LJ013-0005.wav|pitch/LJ013-0005.pt|One of the earliest of the big operators in fraudulent finance was Edward Beaumont Smith, wavs/LJ033-0072.wav|pitch/LJ033-0072.pt|I then stepped off of it and the officer picked it up in the middle and it bent so. wavs/LJ036-0067.wav|pitch/LJ036-0067.pt|According to McWatters, the Beckley bus was behind the Marsalis bus, but he did not actually see it. wavs/LJ025-0098.wav|pitch/LJ025-0098.pt|and it is probable that amyloid substances are universally present in the animal organism, though not in the precise form of starch. wavs/LJ005-0257.wav|pitch/LJ005-0257.pt|during which time a host of witnesses were examined, and the committee presented three separate reports, wavs/LJ004-0024.wav|pitch/LJ004-0024.pt|Thus in eighteen thirteen the exaction of jail fees had been forbidden by law, wavs/LJ049-0154.wav|pitch/LJ049-0154.pt|In eighteen ninety-four, wavs/LJ039-0059.wav|pitch/LJ039-0059.pt|(three) his experience and practice after leaving the Marine Corps, and (four) the accuracy of the weapon and the quality of the ammunition. wavs/LJ007-0150.wav|pitch/LJ007-0150.pt|He is allowed intercourse with prostitutes who, in nine cases out of ten, have originally conduced to his ruin; wavs/LJ015-0001.wav|pitch/LJ015-0001.pt|Chronicles of Newgate, Volume two. By Arthur Griffiths. Section eighteen: Newgate notorieties continued, part three. wavs/LJ010-0158.wav|pitch/LJ010-0158.pt|feeling, as he said, that he might as well be shot or hanged as remain in such a state. wavs/LJ010-0281.wav|pitch/LJ010-0281.pt|who had borne the Queen's commission, first as cornet, and then lieutenant, in the tenth Hussars. wavs/LJ033-0055.wav|pitch/LJ033-0055.pt|and he could disassemble it more rapidly. wavs/LJ015-0218.wav|pitch/LJ015-0218.pt|A new accomplice was now needed within the company's establishment, and Pierce looked about long before he found the right person. wavs/LJ027-0006.wav|pitch/LJ027-0006.pt|In all these lines the facts are drawn together by a strong thread of unity. wavs/LJ016-0049.wav|pitch/LJ016-0049.pt|He had here completed his ascent. wavs/LJ006-0088.wav|pitch/LJ006-0088.pt|It was not likely that a system which left innocent men -- for the great bulk of new arrivals were still untried wavs/LJ042-0133.wav|pitch/LJ042-0133.pt|a great change must have occurred in Oswald's thinking to induce him to return to the United States. wavs/LJ045-0234.wav|pitch/LJ045-0234.pt|While he did become enraged at at least one point in his interrogation, wavs/LJ046-0033.wav|pitch/LJ046-0033.pt|The adequacy of existing procedures can fairly be assessed only after full consideration of the difficulty of the protective assignment, wavs/LJ037-0061.wav|pitch/LJ037-0061.pt|and having, quote, somewhat bushy, end quote, hair. wavs/LJ032-0025.wav|pitch/LJ032-0025.pt|the officers of Klein's discovered that a rifle bearing serial number C two seven six six had been shipped to one A. Hidell, wavs/LJ047-0197.wav|pitch/LJ047-0197.pt|in view of all the information concerning Oswald in its files, should have alerted the Secret Service to Oswald's presence in Dallas wavs/LJ018-0130.wav|pitch/LJ018-0130.pt|and stole paper on a much larger scale than Brown. wavs/LJ005-0265.wav|pitch/LJ005-0265.pt|It was recommended that the dietaries should be submitted and approved like the rules; that convicted prisoners should not receive any food but the jail allowance; wavs/LJ044-0105.wav|pitch/LJ044-0105.pt|He presented Arnold Johnson, Gus Hall, wavs/LJ015-0043.wav|pitch/LJ015-0043.pt|This went on for some time, and might never have been discovered had some good stroke of luck provided any of the partners wavs/LJ030-0125.wav|pitch/LJ030-0125.pt|On several occasions when the Vice President's car was slowed down by the throng, Special Agent Youngblood stepped out to hold the crowd back. wavs/LJ043-0140.wav|pitch/LJ043-0140.pt|He also studied Dallas bus schedules to prepare for his later use of buses to travel to and from General Walker's house. wavs/LJ002-0220.wav|pitch/LJ002-0220.pt|In consequence of these disclosures, both Bambridge and Huggin, his predecessor in the office, were committed to Newgate, wavs/LJ034-0117.wav|pitch/LJ034-0117.pt|At one:twenty-nine p.m. the police radio reported wavs/LJ018-0276.wav|pitch/LJ018-0276.pt|The first plot was against Mr. Harry Emmanuel, but he escaped, and the attempt was made upon Loudon and Ryder. wavs/LJ004-0077.wav|pitch/LJ004-0077.pt|nor has he a right to poison or starve his fellow-creatures." wavs/LJ042-0194.wav|pitch/LJ042-0194.pt|they should not be confused with slowness, indecision or fear. Only the intellectually fearless could even be remotely attracted to our doctrine, wavs/LJ029-0114.wav|pitch/LJ029-0114.pt|The route chosen from the airport to Main Street was the normal one, except where Harwood Street was selected as the means of access to Main Street wavs/LJ014-0194.wav|pitch/LJ014-0194.pt|The policemen were now in possession; wavs/LJ032-0027.wav|pitch/LJ032-0027.pt|According to its microfilm records, Klein's received an order for a rifle on March thirteen, nineteen sixty-three, wavs/LJ048-0289.wav|pitch/LJ048-0289.pt|However, there is no evidence that these men failed to take any action in Dallas within their power that would have averted the tragedy. wavs/LJ043-0188.wav|pitch/LJ043-0188.pt|that he was the leader of a fascist organization, and when I said that even though all of that might be true, just the same he had no right to take his life, wavs/LJ011-0118.wav|pitch/LJ011-0118.pt|In eighteen twenty-nine the gallows claimed two more victims for this offense. wavs/LJ040-0201.wav|pitch/LJ040-0201.pt|After her interview with Mrs. Oswald, wavs/LJ033-0056.wav|pitch/LJ033-0056.pt|While the rifle may have already been disassembled when Oswald arrived home on Thursday, he had ample time that evening to disassemble the rifle wavs/LJ047-0073.wav|pitch/LJ047-0073.pt|Hosty considered the information to be, quote, stale, unquote, by that time, and did not attempt to verify Oswald's reported statement. wavs/LJ001-0153.wav|pitch/LJ001-0153.pt|only nominally so, however, in many cases, since when he uses a headline he counts that in, wavs/LJ007-0158.wav|pitch/LJ007-0158.pt|or any kind of moral improvement was impossible; the prisoner's career was inevitably downward, till he struck the lowest depths. wavs/LJ028-0502.wav|pitch/LJ028-0502.pt|The Ishtar gateway leading to the palace was encased with beautiful blue glazed bricks, wavs/LJ028-0226.wav|pitch/LJ028-0226.pt|Though Herodotus wrote nearly a hundred years after Babylon fell, his story seems to bear the stamp of truth. wavs/LJ010-0038.wav|pitch/LJ010-0038.pt|as there had been before; as in the year eighteen forty-nine, a year memorable for the Rush murders at Norwich, wavs/LJ019-0241.wav|pitch/LJ019-0241.pt|But in the interval very comprehensive and, I think it must be admitted, salutary changes were successively introduced into the management of prisons. wavs/LJ001-0094.wav|pitch/LJ001-0094.pt|were induced to cut punches for a series of "old style" letters. wavs/LJ001-0015.wav|pitch/LJ001-0015.pt|the forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. wavs/LJ047-0015.wav|pitch/LJ047-0015.pt|From defection to return to Fort Worth. wavs/LJ044-0139.wav|pitch/LJ044-0139.pt|since there was no background to the New Orleans FPCC, quote, organization, end quote, which consisted solely of Oswald. wavs/LJ050-0031.wav|pitch/LJ050-0031.pt|that the Secret Service consciously set about the task of inculcating and maintaining the highest standard of excellence and esprit, for all of its personnel. wavs/LJ050-0235.wav|pitch/LJ050-0235.pt|It has also used other Federal law enforcement agents during Presidential visits to cities in which such agents are stationed. wavs/LJ050-0137.wav|pitch/LJ050-0137.pt|FBI, and the Secret Service. wavs/LJ031-0109.wav|pitch/LJ031-0109.pt|At one:thirty-five p.m., after Governor Connally had been moved to the operating room, Dr. Shaw started the first operation wavs/LJ031-0041.wav|pitch/LJ031-0041.pt|He noted that the President was blue-white or ashen in color; had slow, spasmodic, agonal respiration without any coordination; wavs/LJ021-0139.wav|pitch/LJ021-0139.pt|There should be at least a full and fair trial given to these means of ending industrial warfare; wavs/LJ029-0004.wav|pitch/LJ029-0004.pt|The narrative of these events is based largely on the recollections of the participants, wavs/LJ023-0122.wav|pitch/LJ023-0122.pt|It was said in last year's Democratic platform, wavs/LJ005-0264.wav|pitch/LJ005-0264.pt|inspectors of prisons should be appointed, who should visit all the prisons from time to time and report to the Secretary of State. wavs/LJ002-0105.wav|pitch/LJ002-0105.pt|and beyond it was a room called the "wine room," because formerly used for the sale of wine, but wavs/LJ017-0035.wav|pitch/LJ017-0035.pt|in the interests and for the due protection of the public, that the fullest and fairest inquiry should be made, wavs/LJ048-0252.wav|pitch/LJ048-0252.pt|Three of these agents occupied positions on the running boards of the car, and the fourth was seated in the car. wavs/LJ013-0109.wav|pitch/LJ013-0109.pt|The proceeds of the robbery were lodged in a Boston bank, wavs/LJ039-0139.wav|pitch/LJ039-0139.pt|Oswald obtained a hunting license, joined a hunting club and went hunting about six times, as discussed more fully in chapter six. wavs/LJ044-0047.wav|pitch/LJ044-0047.pt|that anyone ever attacked any street demonstration in which Oswald was involved, except for the Bringuier incident mentioned above, wavs/LJ016-0417.wav|pitch/LJ016-0417.pt|Catherine Wilson, the poisoner, was reserved and reticent to the last, expressing no contrition, but also no fear -- wavs/LJ045-0178.wav|pitch/LJ045-0178.pt|he left his wedding ring in a cup on the dresser in his room. He also left one hundred seventy dollars in a wallet in one of the dresser drawers. wavs/LJ009-0172.wav|pitch/LJ009-0172.pt|While in London, for instance, in eighteen twenty-nine, twenty-four persons had been executed for crimes other than murder, wavs/LJ049-0202.wav|pitch/LJ049-0202.pt|incident to its responsibilities. wavs/LJ032-0103.wav|pitch/LJ032-0103.pt|The name "Hidell" was stamped on some of the "Chapter's" printed literature and on the membership application blanks. wavs/LJ013-0091.wav|pitch/LJ013-0091.pt|and Elder had to be assisted by two bank porters, who carried it for him to a carriage waiting near the Mansion House. wavs/LJ037-0208.wav|pitch/LJ037-0208.pt|nineteen dollars, ninety-five cents, plus one dollar, twenty-seven cents shipping charge, had been collected from the consignee, Hidell. wavs/LJ014-0128.wav|pitch/LJ014-0128.pt|her hair was dressed in long crepe bands. She had lace ruffles at her wrist, and wore primrose-colored kid gloves. wavs/LJ015-0007.wav|pitch/LJ015-0007.pt|This affected Cole's credit, and ugly reports were in circulation charging him with the issue of simulated warrants. wavs/LJ036-0169.wav|pitch/LJ036-0169.pt|he would have reached his destination at approximately twelve:fifty-four p.m. wavs/LJ021-0040.wav|pitch/LJ021-0040.pt|The second step we have taken in the restoration of normal business enterprise wavs/LJ015-0036.wav|pitch/LJ015-0036.pt|The bank was already insolvent, wavs/LJ034-0041.wav|pitch/LJ034-0041.pt|Although Bureau experiments had shown that twenty-four hours was a likely maximum time, Latona stated wavs/LJ009-0192.wav|pitch/LJ009-0192.pt|The dissection of executed criminals was abolished soon after the discovery of the crime of burking, wavs/LJ037-0248.wav|pitch/LJ037-0248.pt|The eyewitnesses vary in their identification of the jacket. wavs/LJ015-0289.wav|pitch/LJ015-0289.pt|As each transaction was carried out from a different address, and a different messenger always employed, wavs/LJ005-0072.wav|pitch/LJ005-0072.pt|After a few years of active exertion the Society was rewarded by fresh legislation. wavs/LJ023-0047.wav|pitch/LJ023-0047.pt|The three horses are, of course, the three branches of government -- the Congress, the Executive and the courts. wavs/LJ009-0126.wav|pitch/LJ009-0126.pt|Hardly any one. wavs/LJ034-0097.wav|pitch/LJ034-0097.pt|The window was approximately one hundred twenty feet away. wavs/LJ028-0462.wav|pitch/LJ028-0462.pt|They were laid in bitumen. wavs/LJ046-0055.wav|pitch/LJ046-0055.pt|It is now possible for Presidents to travel the length and breadth of a land far larger than the United States wavs/LJ019-0371.wav|pitch/LJ019-0371.pt|Yet the law was seldom if ever enforced. wavs/LJ039-0207.wav|pitch/LJ039-0207.pt|Although all of the shots were a few inches high and to the right of the target, wavs/LJ002-0174.wav|pitch/LJ002-0174.pt|Mr. Buxton's friends at once paid the forty shillings, and the boy was released. wavs/LJ016-0233.wav|pitch/LJ016-0233.pt|In his own profession wavs/LJ026-0108.wav|pitch/LJ026-0108.pt|It is clear that there are upward and downward currents of water containing food (comparable to blood of an animal), wavs/LJ038-0035.wav|pitch/LJ038-0035.pt|Oswald rose from his seat, bringing up both hands. wavs/LJ026-0148.wav|pitch/LJ026-0148.pt|water which is lost by evaporation, especially from the leaf surface through the stomata; wavs/LJ001-0186.wav|pitch/LJ001-0186.pt|the position of our Society that a work of utility might be also a work of art, if we cared to make it so. wavs/LJ016-0264.wav|pitch/LJ016-0264.pt|The upturned faces of the eager spectators resembled those of the 'gods' at Drury Lane on Boxing Night; wavs/LJ009-0041.wav|pitch/LJ009-0041.pt|The occupants of this terrible black pew were the last always to enter the chapel. wavs/LJ010-0297.wav|pitch/LJ010-0297.pt|But there were other notorious cases of forgery. wavs/LJ040-0018.wav|pitch/LJ040-0018.pt|the Commission is not able to reach any definite conclusions as to whether or not he was, quote, sane, unquote, under prevailing legal standards. wavs/LJ005-0253.wav|pitch/LJ005-0253.pt|"to inquire into and report upon the several jails and houses of correction in the counties, cities, and corporate towns within England and Wales wavs/LJ027-0176.wav|pitch/LJ027-0176.pt|Fishes first appeared in the Devonian and Upper Silurian in very reptilian or rather amphibian forms. wavs/LJ034-0035.wav|pitch/LJ034-0035.pt|The position of this palmprint on the carton was parallel with the long axis of the box, and at right angles with the short axis; wavs/LJ016-0054.wav|pitch/LJ016-0054.pt|But he did not like the risk of entering a room by the fireplace, and the chances of detection it offered. wavs/LJ018-0262.wav|pitch/LJ018-0262.pt|Roupell received the announcement with a cheerful countenance, wavs/LJ044-0237.wav|pitch/LJ044-0237.pt|with thirteen dollars, eighty-seven cents when considerably greater resources were available to him. wavs/LJ034-0166.wav|pitch/LJ034-0166.pt|Two other witnesses were able to offer partial descriptions of a man they saw in the southeast corner window wavs/LJ016-0238.wav|pitch/LJ016-0238.pt|"just to steady their legs a little;" in other words, to add his weight to that of the hanging bodies. wavs/LJ042-0198.wav|pitch/LJ042-0198.pt|The discussion above has already set forth examples of his expression of hatred for the United States. wavs/LJ031-0189.wav|pitch/LJ031-0189.pt|At two:thirty-eight p.m., Eastern Standard Time, Lyndon Baines Johnson took the oath of office as the thirty-sixth President of the United States. wavs/LJ050-0084.wav|pitch/LJ050-0084.pt|or, quote, other high government officials in the nature of a complaint coupled with an expressed or implied determination to use a means, wavs/LJ044-0158.wav|pitch/LJ044-0158.pt|As for my return entrance visa please consider it separately. End quote. wavs/LJ045-0082.wav|pitch/LJ045-0082.pt|it appears that Marina Oswald also complained that her husband was not able to provide more material things for her. wavs/LJ045-0190.wav|pitch/LJ045-0190.pt|appeared in The Dallas Times Herald on November fifteen, nineteen sixty-three. wavs/LJ035-0155.wav|pitch/LJ035-0155.pt|The only exit from the office in the direction Oswald was moving was through the door to the front stairway. wavs/LJ044-0004.wav|pitch/LJ044-0004.pt|Political Activities wavs/LJ046-0016.wav|pitch/LJ046-0016.pt|The Commission has not undertaken a comprehensive examination of all facets of this subject; wavs/LJ019-0368.wav|pitch/LJ019-0368.pt|The latter too was to be laid before the House of Commons. wavs/LJ010-0062.wav|pitch/LJ010-0062.pt|But they proceeded in all seriousness, and would have shrunk from no outrage or atrocity in furtherance of their foolhardy enterprise. wavs/LJ033-0159.wav|pitch/LJ033-0159.pt|It was from Oswald's right hand, in which he carried the long package as he walked from Frazier's car to the building. wavs/LJ002-0171.wav|pitch/LJ002-0171.pt|The boy declared he saw no one, and accordingly passed through without paying the toll of a penny. wavs/LJ002-0298.wav|pitch/LJ002-0298.pt|in his evidence in eighteen fourteen, said it was more, wavs/LJ012-0219.wav|pitch/LJ012-0219.pt|and in one corner, at some depth, a bundle of clothes were unearthed, which, with a hairy cap, wavs/LJ017-0190.wav|pitch/LJ017-0190.pt|After this came the charge of administering oil of vitriol, which failed, as has been described. wavs/LJ019-0179.wav|pitch/LJ019-0179.pt|This, with a scheme for limiting the jail to untried prisoners, had been urgently recommended by Lord John Russell in eighteen thirty. wavs/LJ050-0188.wav|pitch/LJ050-0188.pt|each patrolman might be given a prepared booklet of instructions explaining what is expected of him. The Secret Service has expressed concern wavs/LJ006-0043.wav|pitch/LJ006-0043.pt|The disgraceful overcrowding had been partially ended, but the same evils of indiscriminate association were still present; there was the old neglect of decency, wavs/LJ029-0060.wav|pitch/LJ029-0060.pt|A number of people who resembled some of those in the photographs were placed under surveillance at the Trade Mart. wavs/LJ019-0052.wav|pitch/LJ019-0052.pt|Both systems came to us from the United States. The difference was really more in degree than in principle, wavs/LJ037-0081.wav|pitch/LJ037-0081.pt|Later in the day each woman found an empty shell on the ground near the house. These two shells were delivered to the police. wavs/LJ048-0200.wav|pitch/LJ048-0200.pt|paying particular attention to the crowd for any unusual activity. wavs/LJ016-0426.wav|pitch/LJ016-0426.pt|come along, gallows. wavs/LJ008-0182.wav|pitch/LJ008-0182.pt|A tremendous crowd assembled when Bellingham was executed in eighteen twelve for the murder of Spencer Percival, at that time prime minister; wavs/LJ043-0107.wav|pitch/LJ043-0107.pt|Upon moving to New Orleans on April twenty-four, nineteen sixty-three, wavs/LJ006-0084.wav|pitch/LJ006-0084.pt|and so numerous were his opportunities of showing favoritism, that all the prisoners may be said to be in his power. wavs/LJ025-0081.wav|pitch/LJ025-0081.pt|has no permanent digestive cavity or mouth, but takes in its food anywhere and digests, so to speak, all over its body. wavs/LJ019-0042.wav|pitch/LJ019-0042.pt|These were either satisfied with a makeshift, and modified existing buildings, without close regard to their suitability, or for a long time did nothing at all. wavs/LJ047-0240.wav|pitch/LJ047-0240.pt|They agree that Hosty told Revill wavs/LJ032-0012.wav|pitch/LJ032-0012.pt|the resistance to arrest and the attempted shooting of another police officer by the man (Lee Harvey Oswald) subsequently accused of assassinating President Kennedy wavs/LJ050-0209.wav|pitch/LJ050-0209.pt|The assistant to the Director of the FBI testified that
TensorFlow2/LanguageModeling/ELECTRA
ELECTRA
run_tf_squad
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import subprocess import time import argparse import json import logging import tensorflow as tf import horovod.tensorflow as hvd from horovod.tensorflow.compression import Compression from gpu_affinity import set_affinity if sys.version_info[0] == 2: import cPickle as pickle else: import pickle from tqdm import tqdm import dllogger from utils import is_main_process, format_step, get_rank, get_world_size, log from configuration import ElectraConfig from modeling import TFElectraForQuestionAnswering from tokenization import ElectraTokenizer from optimization import create_optimizer from squad_utils import SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features, \ SquadResult, RawResult, get_answers TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/electra-small-generator", "google/electra-base-generator", "google/electra-large-generator", "google/electra-small-discriminator", "google/electra-base-discriminator", "google/electra-large-discriminator", # See all ELECTRA models at https://huggingface.co/models?filter=electra ] def parse_args(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--electra_model", default=None, type=str, required=True, help="Model selected in the list: " + ", ".join(TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST)) parser.add_argument("--data_dir", default=None, type=str, required=True, help="Path to dataset.") parser.add_argument("--output_dir", default=".", type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.") parser.add_argument("--init_checkpoint", default=None, type=str, help="The checkpoint file from pretraining") # Other parameters parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_eval", action='store_true', help="Whether to use evaluate accuracy of predictions") parser.add_argument("--train_file", default=None, type=str, help="SQuAD json for training. E.g., train-v1.1.json") parser.add_argument("--predict_file", default=None, type=str, help="SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.") parser.add_argument("--learning_rate", default=1e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay_rate", default=0.01, type=float, help="Weight decay if we apply some.") parser.add_argument("--layerwise_lr_decay", default=0.8, type=float, help="The layerwise learning rate decay. Shallower layers have lower learning rates.") parser.add_argument("--num_train_epochs", default=3, type=int, help="Total number of training epochs to perform.") parser.add_argument("--max_steps", default=-1.0, type=float, help="Total number of training steps to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% " "of training.") parser.add_argument("--max_seq_length", default=384, type=int, help="The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded.") parser.add_argument("--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.") parser.add_argument("--max_query_length", default=64, type=int, help="The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length.") parser.add_argument("--vocab_file", default=None, type=str, help="Path to vocabulary file use for tokenization") parser.add_argument("--ci", action="store_true", help="true if running on CI") parser.add_argument( "--joint_head", default=True, type=bool, help="Jointly predict the start and end positions", ) parser.add_argument( "--beam_size", default=4, type=int, help="Beam size when doing joint predictions", ) parser.add_argument("--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json " "output file.") parser.add_argument("--max_answer_length", default=30, type=int, help="The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another.") parser.add_argument("--verbose_logging", action='store_true', help="If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument("--do_lower_case", action='store_true', help="Whether to lower case the input text. True for uncased models, False for cased models.") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', -1), help="local_rank for distributed training on gpus") parser.add_argument('--amp', action='store_true', help="Automatic mixed precision training") parser.add_argument('--fp16_all_reduce', action='store_true', help="Whether to use 16-bit all reduce") parser.add_argument('--xla', action='store_true', help="Whether to use XLA") parser.add_argument('--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.') parser.add_argument('--null_score_diff_threshold', type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.") parser.add_argument('--log_freq', type=int, default=50, help='frequency of logging loss.') parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to the specified file.') parser.add_argument("--eval_script", help="Script to evaluate squad predictions", default="evaluate.py", type=str) parser.add_argument("--use_env", action='store_true', help="Whether to read local rank from ENVVAR") parser.add_argument('--skip_checkpoint', default=False, action='store_true', help="Whether to save checkpoints") parser.add_argument('--disable-progress-bar', default=False, action='store_true', help='Disable tqdm progress bar') parser.add_argument("--skip_cache", default=False, action='store_true', help="Whether to cache train features") parser.add_argument("--cache_dir", default=None, type=str, help="Location to cache train feaures. Will default to the dataset direct") args = parser.parse_args() if not args.do_train and (not args.init_checkpoint or args.init_checkpoint == 'None'): raise ValueError("Checkpoint is required if do_train is not set") return args def get_dataset_from_features(features, batch_size, drop_remainder=True, ngpu=8, mode="train", v2=False): """Input function for training""" all_input_ids = tf.convert_to_tensor([f.input_ids for f in features], dtype=tf.int64) all_input_mask = tf.convert_to_tensor([f.attention_mask for f in features], dtype=tf.int64) all_segment_ids = tf.convert_to_tensor([f.token_type_ids for f in features], dtype=tf.int64) all_start_pos = tf.convert_to_tensor([f.start_position for f in features], dtype=tf.int64) all_end_pos = tf.convert_to_tensor([f.end_position for f in features], dtype=tf.int64) # if v2 else None: all_cls_index = tf.convert_to_tensor([f.cls_index for f in features], dtype=tf.int64) all_p_mask = tf.convert_to_tensor([f.p_mask for f in features], dtype=tf.float32) all_is_impossible = tf.convert_to_tensor([f.is_impossible for f in features], dtype=tf.float32) dataset = tf.data.Dataset.from_tensor_slices( (all_input_ids, all_input_mask, all_segment_ids, all_start_pos, all_end_pos) + (all_cls_index, all_p_mask, all_is_impossible)) if ngpu > 1: dataset = dataset.shard(get_world_size(), get_rank()) if mode == "train": dataset = dataset.shuffle(batch_size * 3) # dataset = dataset.map(self._preproc_samples, # num_parallel_calls=multiprocessing.cpu_count()//self._num_gpus) dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) dataset = dataset.prefetch(batch_size) return dataset @tf.function def train_step(model, inputs, loss, amp, opt, init, v2=False, loss_class=None, fp16=False, clip_norm=1.0): with tf.GradientTape() as tape: [input_ids, input_mask, segment_ids, start_positions, end_positions, cls_index, p_mask, is_impossible] = inputs if not v2: is_impossible = None start_logits, end_logits, cls_logits = model(input_ids, attention_mask=input_mask, token_type_ids=segment_ids, start_positions=start_positions, end_positions=end_positions, cls_index=cls_index, p_mask=p_mask, is_impossible=is_impossible, position_ids=None, head_mask=None, inputs_embeds=None, training=True, )[0:3] # If we are on multi-GPU, split add a dimension if len(start_positions.shape) > 1: start_positions = tf.squeeze(start_positions, axis=-1, name="squeeze_start_positions") if len(end_positions.shape) > 1: end_positions = tf.squeeze(end_positions, axis=-1, name="squeeze_end_positions") if is_impossible is not None and len(is_impossible.shape) > 1 and v2 and cls_logits is not None: is_impossible = tf.squeeze(is_impossible, axis=-1, name="squeeze_is_impossible") # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.shape[1] start_positions = tf.clip_by_value(start_positions, 0, ignored_index, name="clip_start_positions") end_positions = tf.clip_by_value(end_positions, 0, ignored_index, name="clip_end_positions") start_loss = loss(y_true=start_positions, y_pred=tf.cast(start_logits, tf.float32)) end_loss = loss(y_true=end_positions, y_pred=tf.cast(end_logits, tf.float32)) loss_value = (start_loss + end_loss) / 2 if v2: cls_loss_value = loss_class(y_true=is_impossible, y_pred=tf.cast(cls_logits, tf.float32)) loss_value += cls_loss_value * 0.5 unscaled_loss = tf.stop_gradient(loss_value) if amp: loss_value = opt.get_scaled_loss(loss_value) tape = hvd.DistributedGradientTape(tape, sparse_as_dense=True, compression=Compression.fp16 if fp16 else Compression.none) gradients = tape.gradient(loss_value, model.trainable_variables) if amp: gradients = opt.get_unscaled_gradients(gradients) (gradients, _) = tf.clip_by_global_norm(gradients, clip_norm=clip_norm) opt.apply_gradients(zip(gradients, model.trainable_variables)) # , clip_norm=1.0) if init: hvd.broadcast_variables(model.variables, root_rank=0) hvd.broadcast_variables(opt.variables(), root_rank=0) return unscaled_loss # , outputs#, tape.gradient(loss_value, model.trainable_variables) @tf.function def infer_step(model, input_ids, attention_mask=None, token_type_ids=None, cls_index=None, p_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, training=False, ): return model(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, cls_index=cls_index, p_mask=p_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, training=training, ) def main(): args = parse_args() hvd.init() set_affinity(hvd.local_rank()) if is_main_process(): log("Running total processes: {}".format(get_world_size())) log("Starting process: {}".format(get_rank())) if is_main_process(): dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) else: dllogger.init(backends=[]) dllogger.metadata("exact_match", {"unit": None}) dllogger.metadata("F1", {"unit": None}) dllogger.metadata("inference_sequences_per_second", {"unit": "sequences/s"}) dllogger.metadata("training_sequences_per_second", {"unit": "sequences/s"}) tf.random.set_seed(args.seed) dllogger.log(step="PARAMETER", data={"SEED": args.seed}) # script parameters BATCH_SIZE = args.train_batch_size EVAL_BATCH_SIZE = args.predict_batch_size USE_XLA = args.xla USE_AMP = args.amp EPOCHS = args.num_train_epochs if not args.do_train: EPOCHS = args.num_train_epochs = 1 log("Since running inference only, setting args.num_train_epochs to 1") if not os.path.exists(args.output_dir) and is_main_process(): os.makedirs(args.output_dir) # TensorFlow configuration gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) tf.config.experimental.set_visible_devices(gpus[hvd.local_rank()], 'GPU') tf.config.optimizer.set_jit(USE_XLA) #tf.config.optimizer.set_experimental_options({"auto_mixed_precision": USE_AMP}) if args.amp: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16", loss_scale="dynamic") tf.keras.mixed_precision.experimental.set_policy(policy) print('Compute dtype: %s' % policy.compute_dtype) # Compute dtype: float16 print('Variable dtype: %s' % policy.variable_dtype) # Variable dtype: float32 if is_main_process(): log("***** Loading tokenizer and model *****") # Load tokenizer and model from pretrained model/vocabulary. Specify the number of labels to classify (2+: classification, 1: regression) electra_model = args.electra_model config = ElectraConfig.from_pretrained(electra_model, cache_dir=args.cache_dir) config.update({"amp": args.amp}) if args.vocab_file is None: tokenizer = ElectraTokenizer.from_pretrained(electra_model, cache_dir=args.cache_dir) else: tokenizer = ElectraTokenizer( vocab_file=args.vocab_file, do_lower_case=args.do_lower_case) model = TFElectraForQuestionAnswering.from_pretrained(electra_model, config=config, cache_dir=args.cache_dir, args=args) if is_main_process(): log("***** Loading dataset *****") # Load data processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() train_examples = processor.get_train_examples(args.data_dir) if args.do_train else None dev_examples = processor.get_dev_examples(args.data_dir) if args.do_predict else None if is_main_process(): log("***** Loading features *****") # Load cached features squad_version = '2.0' if args.version_2_with_negative else '1.1' if args.cache_dir is None: args.cache_dir = args.data_dir cached_train_features_file = args.cache_dir.rstrip('/') + '/' + 'TF2_train-v{4}.json_{1}_{2}_{3}'.format( electra_model.split("/")[1], str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length), squad_version) cached_dev_features_file = args.cache_dir.rstrip('/') + '/' + 'TF2_dev-v{4}.json_{1}_{2}_{3}'.format( electra_model.split("/")[1], str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length), squad_version) try: with open(cached_train_features_file, "rb") as reader: train_features = pickle.load(reader) if args.do_train else [] with open(cached_dev_features_file, "rb") as reader: dev_features = pickle.load(reader) if args.do_predict else [] except: train_features = ( # TODO: (yy) do on rank 0? squad_convert_examples_to_features( examples=train_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=True, return_dataset="", ) if args.do_train else [] ) dev_features = ( squad_convert_examples_to_features( examples=dev_examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=False, return_dataset="", ) if args.do_predict else [] ) # Dump Cached features if not args.skip_cache and is_main_process(): if args.do_train: log("***** Building Cache Files: {} *****".format(cached_train_features_file)) with open(cached_train_features_file, "wb") as writer: pickle.dump(train_features, writer) if args.do_predict: log("***** Building Cache Files: {} *****".format(cached_dev_features_file)) with open(cached_dev_features_file, "wb") as writer: pickle.dump(dev_features, writer) len_train_features = len(train_features) total_train_steps = int((len_train_features * EPOCHS / BATCH_SIZE) / get_world_size()) + 1 train_steps_per_epoch = int((len_train_features / BATCH_SIZE) / get_world_size()) + 1 len_dev_features = len(dev_features) total_dev_steps = int((len_dev_features / EVAL_BATCH_SIZE)) + 1 train_dataset = get_dataset_from_features(train_features, BATCH_SIZE, v2=args.version_2_with_negative) if args.do_train else [] dev_dataset = get_dataset_from_features(dev_features, EVAL_BATCH_SIZE, drop_remainder=False, ngpu=1, mode="dev", v2=args.version_2_with_negative) if args.do_predict else [] opt = create_optimizer(init_lr=args.learning_rate, num_train_steps=total_train_steps, num_warmup_steps=int(args.warmup_proportion * total_train_steps), weight_decay_rate=args.weight_decay_rate, layerwise_lr_decay=args.layerwise_lr_decay, n_transformer_layers=model.num_hidden_layers) if USE_AMP: # loss scaling is currently required when using mixed precision opt = tf.keras.mixed_precision.experimental.LossScaleOptimizer(opt, "dynamic") # Define loss function loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_class = tf.keras.losses.BinaryCrossentropy( from_logits=True, name='binary_crossentropy' ) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") model.compile(optimizer=opt, loss=loss, metrics=[metric]) train_loss_results = [] if args.do_train and is_main_process(): log("***** Running training *****") log(" Num examples = ", len_train_features) log(" Num Epochs = ", args.num_train_epochs) log(" Instantaneous batch size per GPU = ", args.train_batch_size) log( " Total train batch size (w. parallel, distributed & accumulation) = ", args.train_batch_size * get_world_size(), ) log(" Total optimization steps =", total_train_steps) total_train_time = 0 latency = [] for epoch in range(EPOCHS): if args.do_train: epoch_loss_avg = tf.keras.metrics.Mean() epoch_perf_avg = tf.keras.metrics.Mean() epoch_start = time.time() epoch_iterator = tqdm(train_dataset, total=train_steps_per_epoch, desc="Iteration", mininterval=5, disable=not is_main_process()) for iter, inputs in enumerate(epoch_iterator): # breaking criterion if max_steps if > 1 if args.max_steps > 0 and (epoch * train_steps_per_epoch + iter) > args.max_steps: break iter_start = time.time() # Optimize the model loss_value = train_step(model, inputs, loss, USE_AMP, opt, (iter == 0 and epoch == 0), v2=args.version_2_with_negative, loss_class=loss_class, fp16=USE_AMP) #introduce CPU-GPU sync for training perf computation loss_numpy = loss_value.numpy() epoch_perf_avg.update_state(1. * BATCH_SIZE / (time.time() - iter_start)) if iter % args.log_freq == 0: if is_main_process(): log("\nEpoch: {:03d}, Step:{:6d}, Loss:{:12.8f}, Perf:{:5.0f}, loss_scale:{}, opt_step:{}".format(epoch, iter, loss_value, epoch_perf_avg.result() * get_world_size(), opt.loss_scale if config.amp else 1, int(opt.iterations))) dllogger.log(step=(epoch, iter,), data={"step_loss": float(loss_value.numpy()), "train_perf": float( epoch_perf_avg.result().numpy() * get_world_size())}) # Track progress epoch_loss_avg.update_state(loss_value) # Add current batch loss # End epoch train_loss_results.append(epoch_loss_avg.result()) total_train_time += float(time.time() - epoch_start) # Summarize and save checkpoint at the end of each epoch if is_main_process(): dllogger.log(step=tuple(), data={"e2e_train_time": total_train_time, "training_sequences_per_second": float( epoch_perf_avg.result().numpy() * get_world_size()), "final_loss": float(epoch_loss_avg.result().numpy())}) if not args.skip_checkpoint: if args.ci: checkpoint_name = "{}/electra_base_qa_v2_{}_epoch_{}_ckpt".format(args.output_dir, args.version_2_with_negative, epoch + 1) else: checkpoint_name = "checkpoints/electra_base_qa_v2_{}_epoch_{}_ckpt".format(args.version_2_with_negative, epoch + 1) if is_main_process(): model.save_weights(checkpoint_name) if args.do_predict and (args.evaluate_during_training or epoch == args.num_train_epochs - 1): if not args.do_train: log("***** Loading checkpoint: {} *****".format(args.init_checkpoint)) model.load_weights(args.init_checkpoint).expect_partial() current_feature_id = 0 all_results = [] if is_main_process(): log("***** Running evaluation *****") log(" Num Batches = ", total_dev_steps) log(" Batch size = ", args.predict_batch_size) raw_infer_start = time.time() if is_main_process(): infer_perf_avg = tf.keras.metrics.Mean() dev_iterator = tqdm(dev_dataset, total=total_dev_steps, desc="Iteration", mininterval=5, disable=not is_main_process()) for input_ids, input_mask, segment_ids, start_positions, end_positions, cls_index, p_mask, is_impossible in dev_iterator: # training=False is needed only if there are layers with different # behavior during training versus inference (e.g. Dropout). iter_start = time.time() if not args.joint_head: batch_start_logits, batch_end_logits = infer_step(model, input_ids, attention_mask=input_mask, token_type_ids=segment_ids, )[:2] #Synchronize with GPU to compute time _ = batch_start_logits.numpy() else: outputs = infer_step(model, input_ids, attention_mask=input_mask, token_type_ids=segment_ids, cls_index=cls_index, p_mask=p_mask, ) #Synchronize with GPU to compute time _ = outputs[0].numpy() infer_time = (time.time() - iter_start) infer_perf_avg.update_state(1. * EVAL_BATCH_SIZE / infer_time) latency.append(infer_time) for iter_ in range(input_ids.shape[0]): if not args.joint_head: start_logits = batch_start_logits[iter_].numpy().tolist() end_logits = batch_end_logits[iter_].numpy().tolist() dev_feature = dev_features[current_feature_id] current_feature_id += 1 unique_id = int(dev_feature.unique_id) all_results.append(RawResult(unique_id=unique_id, start_logits=start_logits, end_logits=end_logits)) else: dev_feature = dev_features[current_feature_id] current_feature_id += 1 unique_id = int(dev_feature.unique_id) output = [output[iter_].numpy().tolist() for output in outputs] start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) all_results.append(result) # Compute and save predictions answers, nbest_answers = get_answers(dev_examples, dev_features, all_results, args) output_prediction_file = os.path.join(args.output_dir, "predictions.json") output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json") e2e_infer_time = time.time() - raw_infer_start # if args.version_2_with_negative: # output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json") # else: # output_null_log_odds_file = None with open(output_prediction_file, "w") as f: f.write(json.dumps(answers, indent=4) + "\n") with open(output_nbest_file, "w") as f: f.write(json.dumps(nbest_answers, indent=4) + "\n") if args.do_eval: if args.version_2_with_negative: dev_file = "dev-v2.0.json" else: dev_file = "dev-v1.1.json" eval_out = subprocess.check_output([sys.executable, args.eval_script, args.data_dir + "/" + dev_file, output_prediction_file]) log(eval_out.decode('UTF-8')) scores = str(eval_out).strip() exact_match = float(scores.split(":")[1].split(",")[0]) if args.version_2_with_negative: f1 = float(scores.split(":")[2].split(",")[0]) else: f1 = float(scores.split(":")[2].split("}")[0]) log("Epoch: {:03d} Results: {}".format(epoch, eval_out.decode('UTF-8'))) log("**EVAL SUMMARY** - Epoch: {:03d}, EM: {:6.3f}, F1: {:6.3f}, Infer_Perf: {:4.0f} seq/s" .format(epoch, exact_match, f1, infer_perf_avg.result())) latency_all = sorted(latency)[:-2] log( "**LATENCY SUMMARY** - Epoch: {:03d}, Ave: {:6.3f} ms, 90%: {:6.3f} ms, 95%: {:6.3f} ms, 99%: {:6.3f} ms" .format(epoch, sum(latency_all) / len(latency_all) * 1000, sum(latency_all[:int(len(latency_all) * 0.9)]) / int(len(latency_all) * 0.9) * 1000, sum(latency_all[:int(len(latency_all) * 0.95)]) / int(len(latency_all) * 0.95) * 1000, sum(latency_all[:int(len(latency_all) * 0.99)]) / int(len(latency_all) * 0.99) * 1000, )) dllogger.log(step=tuple(), data={"inference_sequences_per_second": float(infer_perf_avg.result().numpy()), "e2e_inference_time": e2e_infer_time}) if is_main_process() and args.do_train and args.do_eval: log( "**RESULTS SUMMARY** - EM: {:6.3f}, F1: {:6.3f}, Train_Time: {:4.0f} s, Train_Perf: {:4.0f} seq/s, Infer_Perf: {:4.0f} seq/s" .format(exact_match, f1, total_train_time, epoch_perf_avg.result() * get_world_size(), infer_perf_avg.result())) dllogger.log(step=tuple(), data={"exact_match": exact_match, "F1": f1}) if __name__ == "__main__": main()
TensorFlow/LanguageModeling/BERT
BERT
multilingual
## Models There are two multilingual models currently available. We do not plan to release more single-language models, but we may release `BERT-Large` versions of these two in the future: * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**: 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)**: 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**: Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M parameters **The `Multilingual Cased (New)` model also fixes normalization issues in many languages, so it is recommended in languages with non-Latin alphabets (and is often better for most languages with Latin alphabets). When using this model, make sure to pass `--do_lower_case=false` to `run_pretraining.py` and other scripts.** See the [list of languages](#list-of-languages) that the Multilingual model supports. The Multilingual model does include Chinese (and English), but if your fine-tuning data is Chinese-only, then the Chinese model will likely produce better results. ## Results To evaluate these systems, we use the [XNLI dataset](https://github.com/facebookresearch/XNLI) dataset, which is a version of [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) where the dev and test sets have been translated (by humans) into 15 languages. Note that the training set was *machine* translated (we used the translations provided by XNLI, not Google NMT). For clarity, we only report on 6 languages below: <!-- mdformat off(no table) --> | System | English | Chinese | Spanish | German | Arabic | Urdu | | --------------------------------- | -------- | -------- | -------- | -------- | -------- | -------- | | XNLI Baseline - Translate Train | 73.7 | 67.0 | 68.8 | 66.5 | 65.8 | 56.6 | | XNLI Baseline - Translate Test | 73.7 | 68.3 | 70.7 | 68.7 | 66.8 | 59.3 | | BERT - Translate Train Cased | **81.9** | **76.6** | **77.8** | **75.9** | **70.7** | 61.6 | | BERT - Translate Train Uncased | 81.4 | 74.2 | 77.3 | 75.2 | 70.5 | 61.7 | | BERT - Translate Test Uncased | 81.4 | 70.1 | 74.9 | 74.4 | 70.4 | **62.1** | | BERT - Zero Shot Uncased | 81.4 | 63.8 | 74.3 | 70.5 | 62.1 | 58.3 | <!-- mdformat on --> The first two rows are baselines from the XNLI paper and the last three rows are our results with BERT. **Translate Train** means that the MultiNLI training set was machine translated from English into the foreign language. So training and evaluation were both done in the foreign language. Unfortunately, training was done on machine-translated data, so it is impossible to quantify how much of the lower accuracy (compared to English) is due to the quality of the machine translation vs. the quality of the pre-trained model. **Translate Test** means that the XNLI test set was machine translated from the foreign language into English. So training and evaluation were both done on English. However, test evaluation was done on machine-translated English, so the accuracy depends on the quality of the machine translation system. **Zero Shot** means that the Multilingual BERT system was fine-tuned on English MultiNLI, and then evaluated on the foreign language XNLI test. In this case, machine translation was not involved at all in either the pre-training or fine-tuning. Note that the English result is worse than the 84.2 MultiNLI baseline because this training used Multilingual BERT rather than English-only BERT. This implies that for high-resource languages, the Multilingual model is somewhat worse than a single-language model. However, it is not feasible for us to train and maintain dozens of single-language model. Therefore, if your goal is to maximize performance with a language other than English or Chinese, you might find it beneficial to run pre-training for additional steps starting from our Multilingual model on data from your language of interest. Here is a comparison of training Chinese models with the Multilingual `BERT-Base` and Chinese-only `BERT-Base`: System | Chinese ----------------------- | ------- XNLI Baseline | 67.0 BERT Multilingual Model | 74.2 BERT Chinese-only Model | 77.2 Similar to English, the single-language model does 3% better than the Multilingual model. ## Fine-tuning Example The multilingual model does **not** require any special consideration or API changes. We did update the implementation of `BasicTokenizer` in `tokenization.py` to support Chinese character tokenization, so please update if you forked it. However, we did not change the tokenization API. To test the new models, we did modify `run_classifier.py` to add support for the [XNLI dataset](https://github.com/facebookresearch/XNLI). This is a 15-language version of MultiNLI where the dev/test sets have been human-translated, and the training set has been machine-translated. To run the fine-tuning code, please download the [XNLI dev/test set](https://s3.amazonaws.com/xnli/XNLI-1.0.zip) and the [XNLI machine-translated training set](https://s3.amazonaws.com/xnli/XNLI-MT-1.0.zip) and then unpack both .zip files into some directory `$XNLI_DIR`. To run fine-tuning on XNLI. The language is hard-coded into `run_classifier.py` (Chinese by default), so please modify `XnliProcessor` if you want to run on another language. This is a large dataset, so this will training will take a few hours on a GPU (or about 30 minutes on a Cloud TPU). To run an experiment quickly for debugging, just set `num_train_epochs` to a small value like `0.1`. ```shell export BERT_BASE_DIR=/path/to/bert/chinese_L-12_H-768_A-12 # or multilingual_L-12_H-768_A-12 export XNLI_DIR=/path/to/xnli python run_classifier.py \ --task_name=XNLI \ --do_train=true \ --do_eval=true \ --data_dir=$XNLI_DIR \ --vocab_file=$BERT_BASE_DIR/vocab.txt \ --bert_config_file=$BERT_BASE_DIR/bert_config.json \ --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \ --max_seq_length=128 \ --train_batch_size=32 \ --learning_rate=5e-5 \ --num_train_epochs=2.0 \ --output_dir=/tmp/xnli_output/ ``` With the Chinese-only model, the results should look something like this: ``` ***** Eval results ***** eval_accuracy = 0.774116 eval_loss = 0.83554 global_step = 24543 loss = 0.74603 ``` ## Details ### Data Source and Sampling The languages chosen were the [top 100 languages with the largest Wikipedias](https://meta.wikimedia.org/wiki/List_of_Wikipedias). The entire Wikipedia dump for each language (excluding user and talk pages) was taken as the training data for each language However, the size of the Wikipedia for a given language varies greatly, and therefore low-resource languages may be "under-represented" in terms of the neural network model (under the assumption that languages are "competing" for limited model capacity to some extent). However, the size of a Wikipedia also correlates with the number of speakers of a language, and we also don't want to overfit the model by performing thousands of epochs over a tiny Wikipedia for a particular language. To balance these two factors, we performed exponentially smoothed weighting of the data during pre-training data creation (and WordPiece vocab creation). In other words, let's say that the probability of a language is *P(L)*, e.g., *P(English) = 0.21* means that after concatenating all of the Wikipedias together, 21% of our data is English. We exponentiate each probability by some factor *S* and then re-normalize, and sample from that distribution. In our case we use *S=0.7*. So, high-resource languages like English will be under-sampled, and low-resource languages like Icelandic will be over-sampled. E.g., in the original distribution English would be sampled 1000x more than Icelandic, but after smoothing it's only sampled 100x more. ### Tokenization For tokenization, we use a 110k shared WordPiece vocabulary. The word counts are weighted the same way as the data, so low-resource languages are upweighted by some factor. We intentionally do *not* use any marker to denote the input language (so that zero-shot training can work). Because Chinese (and Japanese Kanji and Korean Hanja) does not have whitespace characters, we add spaces around every character in the [CJK Unicode range](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_\(Unicode_block\)) before applying WordPiece. This means that Chinese is effectively character-tokenized. Note that the CJK Unicode block only includes Chinese-origin characters and does *not* include Hangul Korean or Katakana/Hiragana Japanese, which are tokenized with whitespace+WordPiece like all other languages. For all other languages, we apply the [same recipe as English](https://github.com/google-research/bert#tokenization): (a) lower casing+accent removal, (b) punctuation splitting, (c) whitespace tokenization. We understand that accent markers have substantial meaning in some languages, but felt that the benefits of reducing the effective vocabulary make up for this. Generally the strong contextual models of BERT should make up for any ambiguity introduced by stripping accent markers. ### List of Languages The multilingual model supports the following languages. These languages were chosen because they are the top 100 languages with the largest Wikipedias: * Afrikaans * Albanian * Arabic * Aragonese * Armenian * Asturian * Azerbaijani * Bashkir * Basque * Bavarian * Belarusian * Bengali * Bishnupriya Manipuri * Bosnian * Breton * Bulgarian * Burmese * Catalan * Cebuano * Chechen * Chinese (Simplified) * Chinese (Traditional) * Chuvash * Croatian * Czech * Danish * Dutch * English * Estonian * Finnish * French * Galician * Georgian * German * Greek * Gujarati * Haitian * Hebrew * Hindi * Hungarian * Icelandic * Ido * Indonesian * Irish * Italian * Japanese * Javanese * Kannada * Kazakh * Kirghiz * Korean * Latin * Latvian * Lithuanian * Lombard * Low Saxon * Luxembourgish * Macedonian * Malagasy * Malay * Malayalam * Marathi * Minangkabau * Nepali * Newar * Norwegian (Bokmal) * Norwegian (Nynorsk) * Occitan * Persian (Farsi) * Piedmontese * Polish * Portuguese * Punjabi * Romanian * Russian * Scots * Serbian * Serbo-Croatian * Sicilian * Slovak * Slovenian * South Azerbaijani * Spanish * Sundanese * Swahili * Swedish * Tagalog * Tajik * Tamil * Tatar * Telugu * Turkish * Ukrainian * Urdu * Uzbek * Vietnamese * Volapük * Waray-Waray * Welsh * West * Western Punjabi * Yoruba The **Multilingual Cased (New)** release contains additionally **Thai** and **Mongolian**, which were not included in the original release.
TensorFlow/Detection/SSD/models/research/object_detection/predictors
predictors
convolutional_box_predictor_test
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for object_detection.predictors.convolutional_box_predictor.""" import numpy as np import tensorflow as tf from google.protobuf import text_format from object_detection.builders import box_predictor_builder from object_detection.builders import hyperparams_builder from object_detection.predictors import convolutional_box_predictor as box_predictor from object_detection.protos import hyperparams_pb2 from object_detection.utils import test_case class ConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { truncated_normal_initializer { } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_get_boxes_for_one_aspect_ratio_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[1], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 64, 1, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 64, 1]) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) (box_encodings, class_predictions_with_background) = self.execute(graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 1, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, use_dropout=True, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_use_depthwise_convolution(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 1, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) expected_variable_set = set([ 'BoxPredictor/Conv2d_0_1x1_32/biases', 'BoxPredictor/Conv2d_0_1x1_32/weights', 'BoxPredictor/BoxEncodingPredictor_depthwise/biases', 'BoxPredictor/BoxEncodingPredictor_depthwise/depthwise_weights', 'BoxPredictor/BoxEncodingPredictor/biases', 'BoxPredictor/BoxEncodingPredictor/weights', 'BoxPredictor/ClassPredictor_depthwise/biases', 'BoxPredictor/ClassPredictor_depthwise/depthwise_weights', 'BoxPredictor/ClassPredictor/biases', 'BoxPredictor/ClassPredictor/weights']) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_dangling_outputs(self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), min_depth=0, max_depth=32, num_layers_before_predictor=1, dropout_keep_prob=0.8, kernel_size=1, box_code_size=4, use_dropout=True, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) bad_dangling_ops = [] types_safe_to_dangle = set(['Assign', 'Mul', 'Const']) for op in tf.get_default_graph().get_operations(): if (not op.outputs) or (not op.outputs[0].consumers()): if 'BoxPredictor' in op.name: if op.type not in types_safe_to_dangle: bad_dangling_ops.append(op) self.assertEqual(bad_dangling_ops, []) class WeightSharedConvolutionalBoxPredictorTest(test_case.TestCase): def _build_arg_scope_with_conv_hyperparams(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } batch_norm { train: true, } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def _build_conv_arg_scope_no_batch_norm(self): conv_hyperparams = hyperparams_pb2.Hyperparams() conv_hyperparams_text_proto = """ activation: RELU_6 regularizer { l2_regularizer { } } initializer { random_normal_initializer { stddev: 0.01 mean: 0.0 } } """ text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams) return hyperparams_builder.build(conv_hyperparams, is_training=True) def test_get_boxes_for_five_aspect_ratios_per_location(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, objectness_predictions) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, objectness_predictions) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(objectness_predictions.shape, [4, 320, 1]) def test_bias_predictions_to_background_with_sigmoid_score_conversion(self): def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=True, num_classes=2, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, class_prediction_bias_init=-4.6, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') class_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (tf.nn.sigmoid(class_predictions),) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) class_predictions = self.execute(graph_fn, [image_features]) self.assertAlmostEqual(np.mean(class_predictions), 0.01, places=3) def test_get_multi_class_predictions_for_five_aspect_ratios_per_location( self): num_classes_without_background = 6 def graph_fn(image_features): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features]) self.assertAllEqual(box_encodings.shape, [4, 320, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 320, num_classes_without_background+1]) def test_get_multi_class_predictions_from_two_feature_maps( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2]) self.assertAllEqual(box_encodings.shape, [4, 640, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 640, num_classes_without_background+1]) def test_get_multi_class_predictions_from_feature_maps_of_different_depth( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2, image_features3): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2, image_features3], num_predictions_per_location=[5, 5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) image_features1 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features2 = np.random.rand(4, 8, 8, 64).astype(np.float32) image_features3 = np.random.rand(4, 8, 8, 32).astype(np.float32) (box_encodings, class_predictions_with_background) = self.execute( graph_fn, [image_features1, image_features2, image_features3]) self.assertAllEqual(box_encodings.shape, [4, 960, 4]) self.assertAllEqual(class_predictions_with_background.shape, [4, 960, num_classes_without_background+1]) def test_predictions_multiple_feature_maps_share_weights_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_multiple_feature_maps_share_weights_with_depthwise( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False, use_depthwise=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/depthwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/pointwise_weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_no_batchnorm_params_when_batchnorm_is_not_configured(self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_conv_arg_scope_no_batch_norm(), depth=32, num_layers_before_predictor=2, box_code_size=4, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Box prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictionTower/conv2d_1/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_separate_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/BatchNorm/feature_1/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_0/beta'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/BatchNorm/feature_1/beta'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_predictions_share_weights_share_tower_without_batchnorm( self): num_classes_without_background = 6 def graph_fn(image_features1, image_features2): conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=num_classes_without_background, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=2, box_code_size=4, share_prediction_tower=True, apply_batch_norm=False)) box_predictions = conv_box_predictor.predict( [image_features1, image_features2], num_predictions_per_location=[5, 5], scope='BoxPredictor') box_encodings = tf.concat( box_predictions[box_predictor.BOX_ENCODINGS], axis=1) class_predictions_with_background = tf.concat( box_predictions[box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) return (box_encodings, class_predictions_with_background) with self.test_session(graph=tf.Graph()): graph_fn(tf.random_uniform([4, 32, 32, 3], dtype=tf.float32), tf.random_uniform([4, 16, 16, 3], dtype=tf.float32)) actual_variable_set = set( [var.op.name for var in tf.trainable_variables()]) expected_variable_set = set([ # Shared prediction tower ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_0/biases'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'PredictionTower/conv2d_1/biases'), # Box prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'BoxPredictor/biases'), # Class prediction head ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/weights'), ('BoxPredictor/WeightSharedConvolutionalBoxPredictor/' 'ClassPredictor/biases')]) self.assertEqual(expected_variable_set, actual_variable_set) def test_get_predictions_with_feature_maps_of_dynamic_shape( self): image_features = tf.placeholder(dtype=tf.float32, shape=[4, None, None, 64]) conv_box_predictor = ( box_predictor_builder.build_weight_shared_convolutional_box_predictor( is_training=False, num_classes=0, conv_hyperparams_fn=self._build_arg_scope_with_conv_hyperparams(), depth=32, num_layers_before_predictor=1, box_code_size=4)) box_predictions = conv_box_predictor.predict( [image_features], num_predictions_per_location=[5], scope='BoxPredictor') box_encodings = tf.concat(box_predictions[box_predictor.BOX_ENCODINGS], axis=1) objectness_predictions = tf.concat(box_predictions[ box_predictor.CLASS_PREDICTIONS_WITH_BACKGROUND], axis=1) init_op = tf.global_variables_initializer() resolution = 32 expected_num_anchors = resolution*resolution*5 with self.test_session() as sess: sess.run(init_op) (box_encodings_shape, objectness_predictions_shape) = sess.run( [tf.shape(box_encodings), tf.shape(objectness_predictions)], feed_dict={image_features: np.random.rand(4, resolution, resolution, 64)}) self.assertAllEqual(box_encodings_shape, [4, expected_num_anchors, 4]) self.assertAllEqual(objectness_predictions_shape, [4, expected_num_anchors, 1]) if __name__ == '__main__': tf.test.main()
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/test
test
Taco2ModulationRemovalLayerPlugin_test
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "UnitTest.hpp" #include "cudaMemory.h" #include "taco2ModulationRemovalLayerPlugin.h" #include "trtUtils.h" #include "NvInfer.h" #include <cfloat> #include <random> #include <vector> using namespace nvinfer1; using namespace nvinfer1::plugin; using namespace tts; /****************************************************************************** * HELPER FUNCTIONS *********************************************************** *****************************************************************************/ namespace { template <typename RNG> std::vector<float> genVec(const size_t size, RNG& rng) { std::uniform_real_distribution<float> dist(-1.0, 1.0); std::vector<float> vec(size); for (size_t i = 0; i < size; ++i) { vec[i] = dist(rng); } return vec; } } // namespace /****************************************************************************** * UNIT TEST ****************************************************************** *****************************************************************************/ TEST(CPUCompareTestBatch1) { std::mt19937 rng(0); const int numFrames = 250; const int filterLength = 1024; const int hopLength = 256; const int inputLength = numFrames * hopLength; std::vector<float> weightsHost = genVec(filterLength, rng); std::fill(weightsHost.begin(), weightsHost.end(), 1.0f); Taco2ModulationRemovalLayerPlugin layer( TRTUtils::toWeights(weightsHost), inputLength, filterLength, hopLength); std::vector<float> inputHost = genVec(inputLength, rng); std::fill(inputHost.begin(), inputHost.end(), 1.0f); CudaMemory<float> inputDevice(inputHost); std::vector<Dims> inputDims{Dims3(1, 1, inputLength)}; const std::vector<Dims> outputDims{Dims3(1, 1, inputLength)}; const std::vector<DataType> dataTypes{DataType::kFLOAT}; const bool broadcast[] = {false}; layer.configurePlugin( inputDims.data(), static_cast<int>(inputDims.size()), outputDims.data(), static_cast<int>(outputDims.size()), dataTypes.data(), dataTypes.data(), broadcast, broadcast, #if NV_TENSORRT_MAJOR < 6 PluginFormat::kNCHW, #else PluginFormat::kLINEAR, #endif 1); layer.initialize(); std::vector<const float*> inputs{inputDevice.data()}; CudaMemory<float> outputDevice(inputLength - filterLength); std::vector<float*> outputs{outputDevice.data()}; layer.enqueue( 1, reinterpret_cast<const void* const*>(inputs.data()), reinterpret_cast<void**>(outputs.data()), nullptr, 0); CudaUtils::sync(0); // perform operations on cpu std::vector<float> windowSum(inputLength, 0); for (int i = 0; i < inputLength; i += hopLength) { for (int j = 0; j < filterLength; ++j) { const int idx = i + j; if (idx < inputLength) { windowSum[idx] += weightsHost[j]; } } } std::vector<float> expOutput(inputLength, 0); for (int x = 0; x < inputLength; ++x) { float val = inputHost[x]; if (windowSum[x] > FLT_MIN) { val /= windowSum[x]; } val *= static_cast<float>(filterLength) / static_cast<float>(hopLength); expOutput[x] = val; } expOutput.erase(expOutput.begin(), expOutput.begin() + (filterLength / 2)); expOutput.erase(expOutput.end() - (filterLength / 2), expOutput.end()); // match outputs const std::vector<float> actOutput = outputDevice.toHost(); ASSERT_EQ(expOutput.size(), actOutput.size()); for (size_t i = 0; i < expOutput.size(); ++i) { EXPECT_NEAR(expOutput[i], actOutput[i], 1e-6) << "i = " << i; } } TEST(CPUCompareTestBatch4) { std::mt19937 rng(0); const int batchSize = 2; const int numFrames = 250; const int filterLength = 1024; const int hopLength = 256; const int inputLength = numFrames * hopLength; std::vector<float> weightsHost = genVec(filterLength, rng); std::fill(weightsHost.begin(), weightsHost.end(), 1.0f); Taco2ModulationRemovalLayerPlugin layer( TRTUtils::toWeights(weightsHost), inputLength, filterLength, hopLength); std::vector<float> inputHost = genVec(batchSize * inputLength, rng); std::fill(inputHost.begin(), inputHost.end(), 1.0f); CudaMemory<float> inputDevice(inputHost); std::vector<Dims> inputDims{Dims3(1, 1, inputLength)}; const std::vector<Dims> outputDims{Dims3(1, 1, inputLength)}; const std::vector<DataType> dataTypes{DataType::kFLOAT}; const bool broadcast[] = {false}; layer.configurePlugin( inputDims.data(), static_cast<int>(inputDims.size()), outputDims.data(), static_cast<int>(outputDims.size()), dataTypes.data(), dataTypes.data(), broadcast, broadcast, PluginFormat::kLINEAR, batchSize); layer.initialize(); std::vector<const float*> inputs{inputDevice.data()}; CudaMemory<float> outputDevice((inputLength - filterLength) * batchSize); std::vector<float*> outputs{outputDevice.data()}; layer.enqueue( batchSize, reinterpret_cast<const void* const*>(inputs.data()), reinterpret_cast<void**>(outputs.data()), nullptr, 0); CudaUtils::sync(0); // perform operations on cpu std::vector<float> windowSum(inputLength, 0); for (int i = 0; i < inputLength; i += hopLength) { for (int j = 0; j < filterLength; ++j) { const int idx = i + j; if (idx < inputLength) { windowSum[idx] += weightsHost[j]; } } } std::vector<float> expOutput(inputLength, 0); for (int x = 0; x < inputLength; ++x) { float val = inputHost[x]; if (windowSum[x] > FLT_MIN) { val /= windowSum[x]; } val *= static_cast<float>(filterLength) / static_cast<float>(hopLength); expOutput[x] = val; } expOutput.erase(expOutput.begin(), expOutput.begin() + (filterLength / 2)); expOutput.erase(expOutput.end() - (filterLength / 2), expOutput.end()); // match outputs -- across entire batch const std::vector<float> actOutput = outputDevice.toHost(); ASSERT_EQ(expOutput.size() * batchSize, actOutput.size()); for (int b = 0; b < batchSize; ++b) { for (size_t i = 0; i < expOutput.size(); ++i) { EXPECT_NEAR(expOutput[i], actOutput[i + b * expOutput.size()], 1e-6) << "i = " << i << ", b = " << b; } } }
PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/src/trt/util
util
trtPtr
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef TT2I_TRTPTR_H #define TT2I_TRTPTR_H #include <memory> namespace tts { template <typename T> class Destroyer { public: void operator()(T* const t) { t->destroy(); } }; template <typename T> using TRTPtr = std::unique_ptr<T, Destroyer<T>>; } // namespace tts #endif
TensorFlow/LanguageModeling/BERT/scripts
scripts
data_download
#!/usr/bin/env bash # Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. NV_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES:-"all"} to_download=${1:-"wiki_only"} # By default, we don't download BooksCorpus dataset due to recent issues with the host server docker run --gpus $NV_VISIBLE_DEVICES \ --rm -it \ --net=host \ --shm-size=1g \ --ulimit memlock=-1 \ --ulimit stack=67108864 \ -v $PWD:/workspace/bert \ bert bash -c "bash data/create_datasets_from_start.sh ${to_download}"